VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 42407

最後變更 在這個檔案從42407是 42407,由 vboxsync 提交於 12 年 前

VMM: Futher work on dealing with hidden segment register, esp. when going stale.

  • 屬性 svn:eol-style 設為 native
檔案大小: 195.5 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205#endif /* VBOX */
206
207/* return non zero if error */
208static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
209 int selector)
210{
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
214
215#ifdef VBOX
216 /* Trying to load a selector with CPL=1? */
217 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
218 {
219 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
220 selector = selector & 0xfffc;
221 }
222#endif /* VBOX */
223
224 if (selector & 0x4)
225 dt = &env->ldt;
226 else
227 dt = &env->gdt;
228 index = selector & ~7;
229 if ((index + 7) > dt->limit)
230 return -1;
231 ptr = dt->base + index;
232 *e1_ptr = ldl_kernel(ptr);
233 *e2_ptr = ldl_kernel(ptr + 4);
234 return 0;
235}
236
237static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
238{
239 unsigned int limit;
240 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
241 if (e2 & DESC_G_MASK)
242 limit = (limit << 12) | 0xfff;
243 return limit;
244}
245
246static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
247{
248 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
249}
250
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252{
253 sc->base = get_seg_base(e1, e2);
254 sc->limit = get_seg_limit(e1, e2);
255 sc->flags = e2;
256#ifdef VBOX
257 sc->newselector = 0;
258 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
259#endif
260}
261
262/* init the segment cache in vm86 mode. */
263static inline void load_seg_vm(int seg, int selector)
264{
265 selector &= 0xffff;
266#ifdef VBOX
267 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
268 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
269 flags |= (3 << DESC_DPL_SHIFT);
270
271 cpu_x86_load_seg_cache(env, seg, selector,
272 (selector << 4), 0xffff, flags);
273#else /* VBOX */
274 cpu_x86_load_seg_cache(env, seg, selector,
275 (selector << 4), 0xffff, 0);
276#endif /* VBOX */
277}
278
279static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
280 uint32_t *esp_ptr, int dpl)
281{
282#ifndef VBOX
283 int type, index, shift;
284#else
285 unsigned int type, index, shift;
286#endif
287
288#if 0
289 {
290 int i;
291 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
292 for(i=0;i<env->tr.limit;i++) {
293 printf("%02x ", env->tr.base[i]);
294 if ((i & 7) == 7) printf("\n");
295 }
296 printf("\n");
297 }
298#endif
299
300 if (!(env->tr.flags & DESC_P_MASK))
301 cpu_abort(env, "invalid tss");
302 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 cpu_abort(env, "invalid tss type");
305 shift = type >> 3;
306 index = (dpl * 4 + 2) << shift;
307 if (index + (4 << shift) - 1 > env->tr.limit)
308 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
309 if (shift == 0) {
310 *esp_ptr = lduw_kernel(env->tr.base + index);
311 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
312 } else {
313 *esp_ptr = ldl_kernel(env->tr.base + index);
314 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
315 }
316}
317
318/* XXX: merge with load_seg() */
319static void tss_load_seg(int seg_reg, int selector)
320{
321 uint32_t e1, e2;
322 int rpl, dpl, cpl;
323
324#ifdef VBOX
325 e1 = e2 = 0; /* gcc warning? */
326 cpl = env->hflags & HF_CPL_MASK;
327 /* Trying to load a selector with CPL=1? */
328 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
329 {
330 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
331 selector = selector & 0xfffc;
332 }
333#endif /* VBOX */
334
335 if ((selector & 0xfffc) != 0) {
336 if (load_segment(&e1, &e2, selector) != 0)
337 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
338 if (!(e2 & DESC_S_MASK))
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 rpl = selector & 3;
341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
342 cpl = env->hflags & HF_CPL_MASK;
343 if (seg_reg == R_CS) {
344 if (!(e2 & DESC_CS_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 /* XXX: is it correct ? */
347 if (dpl != rpl)
348 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
349 if ((e2 & DESC_C_MASK) && dpl > rpl)
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 } else if (seg_reg == R_SS) {
352 /* SS must be writable data */
353 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 if (dpl != cpl || dpl != rpl)
356 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 } else {
358 /* not readable code */
359 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 /* if data or non conforming code, checks the rights */
362 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
363 if (dpl < cpl || dpl < rpl)
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 }
366 }
367 if (!(e2 & DESC_P_MASK))
368 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
369 cpu_x86_load_seg_cache(env, seg_reg, selector,
370 get_seg_base(e1, e2),
371 get_seg_limit(e1, e2),
372 e2);
373 } else {
374 if (seg_reg == R_SS || seg_reg == R_CS)
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376#ifdef VBOX
377# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
378 cpu_x86_load_seg_cache(env, seg_reg, selector,
379 0, 0, 0);
380# endif
381#endif /* VBOX */
382 }
383}
384
385#define SWITCH_TSS_JMP 0
386#define SWITCH_TSS_IRET 1
387#define SWITCH_TSS_CALL 2
388
389/* XXX: restore CPU state in registers (PowerPC case) */
390static void switch_tss(int tss_selector,
391 uint32_t e1, uint32_t e2, int source,
392 uint32_t next_eip)
393{
394 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
395 target_ulong tss_base;
396 uint32_t new_regs[8], new_segs[6];
397 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
398 uint32_t old_eflags, eflags_mask;
399 SegmentCache *dt;
400#ifndef VBOX
401 int index;
402#else
403 unsigned int index;
404#endif
405 target_ulong ptr;
406
407 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
408 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
409
410 /* if task gate, we read the TSS segment and we load it */
411 if (type == 5) {
412 if (!(e2 & DESC_P_MASK))
413 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
414 tss_selector = e1 >> 16;
415 if (tss_selector & 4)
416 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
417 if (load_segment(&e1, &e2, tss_selector) != 0)
418 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
419 if (e2 & DESC_S_MASK)
420 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
421 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
422 if ((type & 7) != 1)
423 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
424 }
425
426 if (!(e2 & DESC_P_MASK))
427 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
428
429 if (type & 8)
430 tss_limit_max = 103;
431 else
432 tss_limit_max = 43;
433 tss_limit = get_seg_limit(e1, e2);
434 tss_base = get_seg_base(e1, e2);
435 if ((tss_selector & 4) != 0 ||
436 tss_limit < tss_limit_max)
437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
438 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
439 if (old_type & 8)
440 old_tss_limit_max = 103;
441 else
442 old_tss_limit_max = 43;
443
444 /* read all the registers from the new TSS */
445 if (type & 8) {
446 /* 32 bit */
447 new_cr3 = ldl_kernel(tss_base + 0x1c);
448 new_eip = ldl_kernel(tss_base + 0x20);
449 new_eflags = ldl_kernel(tss_base + 0x24);
450 for(i = 0; i < 8; i++)
451 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
452 for(i = 0; i < 6; i++)
453 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
454 new_ldt = lduw_kernel(tss_base + 0x60);
455 new_trap = ldl_kernel(tss_base + 0x64);
456 } else {
457 /* 16 bit */
458 new_cr3 = 0;
459 new_eip = lduw_kernel(tss_base + 0x0e);
460 new_eflags = lduw_kernel(tss_base + 0x10);
461 for(i = 0; i < 8; i++)
462 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
463 for(i = 0; i < 4; i++)
464 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
465 new_ldt = lduw_kernel(tss_base + 0x2a);
466 new_segs[R_FS] = 0;
467 new_segs[R_GS] = 0;
468 new_trap = 0;
469 }
470
471 /* NOTE: we must avoid memory exceptions during the task switch,
472 so we make dummy accesses before */
473 /* XXX: it can still fail in some cases, so a bigger hack is
474 necessary to valid the TLB after having done the accesses */
475
476 v1 = ldub_kernel(env->tr.base);
477 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
478 stb_kernel(env->tr.base, v1);
479 stb_kernel(env->tr.base + old_tss_limit_max, v2);
480
481 /* clear busy bit (it is restartable) */
482 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
483 target_ulong ptr;
484 uint32_t e2;
485 ptr = env->gdt.base + (env->tr.selector & ~7);
486 e2 = ldl_kernel(ptr + 4);
487 e2 &= ~DESC_TSS_BUSY_MASK;
488 stl_kernel(ptr + 4, e2);
489 }
490 old_eflags = compute_eflags();
491 if (source == SWITCH_TSS_IRET)
492 old_eflags &= ~NT_MASK;
493
494 /* save the current state in the old TSS */
495 if (type & 8) {
496 /* 32 bit */
497 stl_kernel(env->tr.base + 0x20, next_eip);
498 stl_kernel(env->tr.base + 0x24, old_eflags);
499 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
500 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
501 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
502 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
503 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
504 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
505 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
506 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
507 for(i = 0; i < 6; i++)
508 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
509#ifdef VBOX
510 /* Must store the ldt as it gets reloaded and might have been changed. */
511 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
512#endif
513#if defined(VBOX) && defined(DEBUG)
514 printf("TSS 32 bits switch\n");
515 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
516#endif
517 } else {
518 /* 16 bit */
519 stw_kernel(env->tr.base + 0x0e, next_eip);
520 stw_kernel(env->tr.base + 0x10, old_eflags);
521 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
522 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
523 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
524 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
525 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
526 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
527 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
528 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
529 for(i = 0; i < 4; i++)
530 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
531#ifdef VBOX
532 /* Must store the ldt as it gets reloaded and might have been changed. */
533 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
534#endif
535 }
536
537 /* now if an exception occurs, it will occurs in the next task
538 context */
539
540 if (source == SWITCH_TSS_CALL) {
541 stw_kernel(tss_base, env->tr.selector);
542 new_eflags |= NT_MASK;
543 }
544
545 /* set busy bit */
546 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
547 target_ulong ptr;
548 uint32_t e2;
549 ptr = env->gdt.base + (tss_selector & ~7);
550 e2 = ldl_kernel(ptr + 4);
551 e2 |= DESC_TSS_BUSY_MASK;
552 stl_kernel(ptr + 4, e2);
553 }
554
555 /* set the new CPU state */
556 /* from this point, any exception which occurs can give problems */
557 env->cr[0] |= CR0_TS_MASK;
558 env->hflags |= HF_TS_MASK;
559 env->tr.selector = tss_selector;
560 env->tr.base = tss_base;
561 env->tr.limit = tss_limit;
562 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
563#ifdef VBOX
564 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
565 env->tr.newselector = 0;
566#endif
567
568 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
569 cpu_x86_update_cr3(env, new_cr3);
570 }
571
572 /* load all registers without an exception, then reload them with
573 possible exception */
574 env->eip = new_eip;
575 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
576 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
577 if (!(type & 8))
578 eflags_mask &= 0xffff;
579 load_eflags(new_eflags, eflags_mask);
580 /* XXX: what to do in 16 bit case ? */
581 EAX = new_regs[0];
582 ECX = new_regs[1];
583 EDX = new_regs[2];
584 EBX = new_regs[3];
585 ESP = new_regs[4];
586 EBP = new_regs[5];
587 ESI = new_regs[6];
588 EDI = new_regs[7];
589 if (new_eflags & VM_MASK) {
590 for(i = 0; i < 6; i++)
591 load_seg_vm(i, new_segs[i]);
592 /* in vm86, CPL is always 3 */
593 cpu_x86_set_cpl(env, 3);
594 } else {
595 /* CPL is set the RPL of CS */
596 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
597 /* first just selectors as the rest may trigger exceptions */
598 for(i = 0; i < 6; i++)
599 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
600 }
601
602 env->ldt.selector = new_ldt & ~4;
603 env->ldt.base = 0;
604 env->ldt.limit = 0;
605 env->ldt.flags = 0;
606#ifdef VBOX
607 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
608 env->ldt.newselector = 0;
609#endif
610
611 /* load the LDT */
612 if (new_ldt & 4)
613 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
614
615 if ((new_ldt & 0xfffc) != 0) {
616 dt = &env->gdt;
617 index = new_ldt & ~7;
618 if ((index + 7) > dt->limit)
619 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
620 ptr = dt->base + index;
621 e1 = ldl_kernel(ptr);
622 e2 = ldl_kernel(ptr + 4);
623 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
624 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
625 if (!(e2 & DESC_P_MASK))
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 load_seg_cache_raw_dt(&env->ldt, e1, e2);
628 }
629
630 /* load the segments */
631 if (!(new_eflags & VM_MASK)) {
632 tss_load_seg(R_CS, new_segs[R_CS]);
633 tss_load_seg(R_SS, new_segs[R_SS]);
634 tss_load_seg(R_ES, new_segs[R_ES]);
635 tss_load_seg(R_DS, new_segs[R_DS]);
636 tss_load_seg(R_FS, new_segs[R_FS]);
637 tss_load_seg(R_GS, new_segs[R_GS]);
638 }
639
640 /* check that EIP is in the CS segment limits */
641 if (new_eip > env->segs[R_CS].limit) {
642 /* XXX: different exception if CALL ? */
643 raise_exception_err(EXCP0D_GPF, 0);
644 }
645
646#ifndef CONFIG_USER_ONLY
647 /* reset local breakpoints */
648 if (env->dr[7] & 0x55) {
649 for (i = 0; i < 4; i++) {
650 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
651 hw_breakpoint_remove(env, i);
652 }
653 env->dr[7] &= ~0x55;
654 }
655#endif
656}
657
658/* check if Port I/O is allowed in TSS */
659static inline void check_io(int addr, int size)
660{
661#ifndef VBOX
662 int io_offset, val, mask;
663#else
664 int val, mask;
665 unsigned int io_offset;
666#endif /* VBOX */
667
668 /* TSS must be a valid 32 bit one */
669 if (!(env->tr.flags & DESC_P_MASK) ||
670 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
671 env->tr.limit < 103)
672 goto fail;
673 io_offset = lduw_kernel(env->tr.base + 0x66);
674 io_offset += (addr >> 3);
675 /* Note: the check needs two bytes */
676 if ((io_offset + 1) > env->tr.limit)
677 goto fail;
678 val = lduw_kernel(env->tr.base + io_offset);
679 val >>= (addr & 7);
680 mask = (1 << size) - 1;
681 /* all bits must be zero to allow the I/O */
682 if ((val & mask) != 0) {
683 fail:
684 raise_exception_err(EXCP0D_GPF, 0);
685 }
686}
687
688#ifdef VBOX
689
690/* Keep in sync with gen_check_external_event() */
691void helper_check_external_event()
692{
693 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
694 | CPU_INTERRUPT_EXTERNAL_EXIT
695 | CPU_INTERRUPT_EXTERNAL_TIMER
696 | CPU_INTERRUPT_EXTERNAL_DMA))
697 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
698 && (env->eflags & IF_MASK)
699 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
700 {
701 helper_external_event();
702 }
703
704}
705
706void helper_sync_seg(uint32_t reg)
707{
708 if (env->segs[reg].newselector)
709 sync_seg(env, reg, env->segs[reg].newselector);
710}
711
712#endif /* VBOX */
713
714void helper_check_iob(uint32_t t0)
715{
716 check_io(t0, 1);
717}
718
719void helper_check_iow(uint32_t t0)
720{
721 check_io(t0, 2);
722}
723
724void helper_check_iol(uint32_t t0)
725{
726 check_io(t0, 4);
727}
728
729void helper_outb(uint32_t port, uint32_t data)
730{
731#ifndef VBOX
732 cpu_outb(port, data & 0xff);
733#else
734 cpu_outb(env, port, data & 0xff);
735#endif
736}
737
738target_ulong helper_inb(uint32_t port)
739{
740#ifndef VBOX
741 return cpu_inb(port);
742#else
743 return cpu_inb(env, port);
744#endif
745}
746
747void helper_outw(uint32_t port, uint32_t data)
748{
749#ifndef VBOX
750 cpu_outw(port, data & 0xffff);
751#else
752 cpu_outw(env, port, data & 0xffff);
753#endif
754}
755
756target_ulong helper_inw(uint32_t port)
757{
758#ifndef VBOX
759 return cpu_inw(port);
760#else
761 return cpu_inw(env, port);
762#endif
763}
764
765void helper_outl(uint32_t port, uint32_t data)
766{
767#ifndef VBOX
768 cpu_outl(port, data);
769#else
770 cpu_outl(env, port, data);
771#endif
772}
773
774target_ulong helper_inl(uint32_t port)
775{
776#ifndef VBOX
777 return cpu_inl(port);
778#else
779 return cpu_inl(env, port);
780#endif
781}
782
783static inline unsigned int get_sp_mask(unsigned int e2)
784{
785 if (e2 & DESC_B_MASK)
786 return 0xffffffff;
787 else
788 return 0xffff;
789}
790
791static int exeption_has_error_code(int intno)
792{
793 switch(intno) {
794 case 8:
795 case 10:
796 case 11:
797 case 12:
798 case 13:
799 case 14:
800 case 17:
801 return 1;
802 }
803 return 0;
804}
805
806#ifdef TARGET_X86_64
807#define SET_ESP(val, sp_mask)\
808do {\
809 if ((sp_mask) == 0xffff)\
810 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
811 else if ((sp_mask) == 0xffffffffLL)\
812 ESP = (uint32_t)(val);\
813 else\
814 ESP = (val);\
815} while (0)
816#else
817#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
818#endif
819
820/* in 64-bit machines, this can overflow. So this segment addition macro
821 * can be used to trim the value to 32-bit whenever needed */
822#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
823
824/* XXX: add a is_user flag to have proper security support */
825#define PUSHW(ssp, sp, sp_mask, val)\
826{\
827 sp -= 2;\
828 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
829}
830
831#define PUSHL(ssp, sp, sp_mask, val)\
832{\
833 sp -= 4;\
834 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
835}
836
837#define POPW(ssp, sp, sp_mask, val)\
838{\
839 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
840 sp += 2;\
841}
842
843#define POPL(ssp, sp, sp_mask, val)\
844{\
845 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
846 sp += 4;\
847}
848
849/* protected mode interrupt */
850static void do_interrupt_protected(int intno, int is_int, int error_code,
851 unsigned int next_eip, int is_hw)
852{
853 SegmentCache *dt;
854 target_ulong ptr, ssp;
855 int type, dpl, selector, ss_dpl, cpl;
856 int has_error_code, new_stack, shift;
857 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
858 uint32_t old_eip, sp_mask;
859
860#ifdef VBOX
861 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
862 cpu_loop_exit();
863#endif
864
865 has_error_code = 0;
866 if (!is_int && !is_hw)
867 has_error_code = exeption_has_error_code(intno);
868 if (is_int)
869 old_eip = next_eip;
870 else
871 old_eip = env->eip;
872
873 dt = &env->idt;
874#ifndef VBOX
875 if (intno * 8 + 7 > dt->limit)
876#else
877 if ((unsigned)intno * 8 + 7 > dt->limit)
878#endif
879 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
880 ptr = dt->base + intno * 8;
881 e1 = ldl_kernel(ptr);
882 e2 = ldl_kernel(ptr + 4);
883 /* check gate type */
884 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 switch(type) {
886 case 5: /* task gate */
887 /* must do that check here to return the correct error code */
888 if (!(e2 & DESC_P_MASK))
889 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
890 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
891 if (has_error_code) {
892 int type;
893 uint32_t mask;
894 /* push the error code */
895 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
896 shift = type >> 3;
897 if (env->segs[R_SS].flags & DESC_B_MASK)
898 mask = 0xffffffff;
899 else
900 mask = 0xffff;
901 esp = (ESP - (2 << shift)) & mask;
902 ssp = env->segs[R_SS].base + esp;
903 if (shift)
904 stl_kernel(ssp, error_code);
905 else
906 stw_kernel(ssp, error_code);
907 SET_ESP(esp, mask);
908 }
909 return;
910 case 6: /* 286 interrupt gate */
911 case 7: /* 286 trap gate */
912 case 14: /* 386 interrupt gate */
913 case 15: /* 386 trap gate */
914 break;
915 default:
916 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
917 break;
918 }
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 cpl = env->hflags & HF_CPL_MASK;
921 /* check privilege if software int */
922 if (is_int && dpl < cpl)
923 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
924 /* check valid bit */
925 if (!(e2 & DESC_P_MASK))
926 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
927 selector = e1 >> 16;
928 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
929 if ((selector & 0xfffc) == 0)
930 raise_exception_err(EXCP0D_GPF, 0);
931
932 if (load_segment(&e1, &e2, selector) != 0)
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
937 if (dpl > cpl)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if (!(e2 & DESC_P_MASK))
940 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
941 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
942 /* to inner privilege */
943 get_ss_esp_from_tss(&ss, &esp, dpl);
944 if ((ss & 0xfffc) == 0)
945 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
946 if ((ss & 3) != dpl)
947 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
948 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
951 if (ss_dpl != dpl)
952 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
953 if (!(ss_e2 & DESC_S_MASK) ||
954 (ss_e2 & DESC_CS_MASK) ||
955 !(ss_e2 & DESC_W_MASK))
956 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
957 if (!(ss_e2 & DESC_P_MASK))
958#ifdef VBOX /* See page 3-477 of 253666.pdf */
959 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
960#else
961 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
962#endif
963 new_stack = 1;
964 sp_mask = get_sp_mask(ss_e2);
965 ssp = get_seg_base(ss_e1, ss_e2);
966#if defined(VBOX) && defined(DEBUG)
967 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
968#endif
969 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
970 /* to same privilege */
971 if (env->eflags & VM_MASK)
972 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
973 new_stack = 0;
974 sp_mask = get_sp_mask(env->segs[R_SS].flags);
975 ssp = env->segs[R_SS].base;
976 esp = ESP;
977 dpl = cpl;
978 } else {
979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
980 new_stack = 0; /* avoid warning */
981 sp_mask = 0; /* avoid warning */
982 ssp = 0; /* avoid warning */
983 esp = 0; /* avoid warning */
984 }
985
986 shift = type >> 3;
987
988#if 0
989 /* XXX: check that enough room is available */
990 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
991 if (env->eflags & VM_MASK)
992 push_size += 8;
993 push_size <<= shift;
994#endif
995 if (shift == 1) {
996 if (new_stack) {
997 if (env->eflags & VM_MASK) {
998 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
999 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1000 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1001 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1002 }
1003 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1004 PUSHL(ssp, esp, sp_mask, ESP);
1005 }
1006 PUSHL(ssp, esp, sp_mask, compute_eflags());
1007 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1008 PUSHL(ssp, esp, sp_mask, old_eip);
1009 if (has_error_code) {
1010 PUSHL(ssp, esp, sp_mask, error_code);
1011 }
1012 } else {
1013 if (new_stack) {
1014 if (env->eflags & VM_MASK) {
1015 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1016 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1017 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1018 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1019 }
1020 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1021 PUSHW(ssp, esp, sp_mask, ESP);
1022 }
1023 PUSHW(ssp, esp, sp_mask, compute_eflags());
1024 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1025 PUSHW(ssp, esp, sp_mask, old_eip);
1026 if (has_error_code) {
1027 PUSHW(ssp, esp, sp_mask, error_code);
1028 }
1029 }
1030
1031 if (new_stack) {
1032 if (env->eflags & VM_MASK) {
1033 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1034 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1035 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1036 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1037 }
1038 ss = (ss & ~3) | dpl;
1039 cpu_x86_load_seg_cache(env, R_SS, ss,
1040 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1041 }
1042 SET_ESP(esp, sp_mask);
1043
1044 selector = (selector & ~3) | dpl;
1045 cpu_x86_load_seg_cache(env, R_CS, selector,
1046 get_seg_base(e1, e2),
1047 get_seg_limit(e1, e2),
1048 e2);
1049 cpu_x86_set_cpl(env, dpl);
1050 env->eip = offset;
1051
1052 /* interrupt gate clear IF mask */
1053 if ((type & 1) == 0) {
1054 env->eflags &= ~IF_MASK;
1055 }
1056#ifndef VBOX
1057 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1058#else
1059 /*
1060 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1061 * gets confused by seemingly changed EFLAGS. See #3491 and
1062 * public bug #2341.
1063 */
1064 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1065#endif
1066}
1067
1068#ifdef VBOX
1069
1070/* check if VME interrupt redirection is enabled in TSS */
1071DECLINLINE(bool) is_vme_irq_redirected(int intno)
1072{
1073 unsigned int io_offset, intredir_offset;
1074 unsigned char val, mask;
1075
1076 /* TSS must be a valid 32 bit one */
1077 if (!(env->tr.flags & DESC_P_MASK) ||
1078 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1079 env->tr.limit < 103)
1080 goto fail;
1081 io_offset = lduw_kernel(env->tr.base + 0x66);
1082 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1083 if (io_offset < 0x68 + 0x20)
1084 io_offset = 0x68 + 0x20;
1085 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1086 intredir_offset = io_offset - 0x20;
1087
1088 intredir_offset += (intno >> 3);
1089 if ((intredir_offset) > env->tr.limit)
1090 goto fail;
1091
1092 val = ldub_kernel(env->tr.base + intredir_offset);
1093 mask = 1 << (unsigned char)(intno & 7);
1094
1095 /* bit set means no redirection. */
1096 if ((val & mask) != 0) {
1097 return false;
1098 }
1099 return true;
1100
1101fail:
1102 raise_exception_err(EXCP0D_GPF, 0);
1103 return true;
1104}
1105
1106/* V86 mode software interrupt with CR4.VME=1 */
1107static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1108{
1109 target_ulong ptr, ssp;
1110 int selector;
1111 uint32_t offset, esp;
1112 uint32_t old_cs, old_eflags;
1113 uint32_t iopl;
1114
1115 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1116
1117 if (!is_vme_irq_redirected(intno))
1118 {
1119 if (iopl == 3)
1120 {
1121 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1122 return;
1123 }
1124 else
1125 raise_exception_err(EXCP0D_GPF, 0);
1126 }
1127
1128 /* virtual mode idt is at linear address 0 */
1129 ptr = 0 + intno * 4;
1130 offset = lduw_kernel(ptr);
1131 selector = lduw_kernel(ptr + 2);
1132 esp = ESP;
1133 ssp = env->segs[R_SS].base;
1134 old_cs = env->segs[R_CS].selector;
1135
1136 old_eflags = compute_eflags();
1137 if (iopl < 3)
1138 {
1139 /* copy VIF into IF and set IOPL to 3 */
1140 if (env->eflags & VIF_MASK)
1141 old_eflags |= IF_MASK;
1142 else
1143 old_eflags &= ~IF_MASK;
1144
1145 old_eflags |= (3 << IOPL_SHIFT);
1146 }
1147
1148 /* XXX: use SS segment size ? */
1149 PUSHW(ssp, esp, 0xffff, old_eflags);
1150 PUSHW(ssp, esp, 0xffff, old_cs);
1151 PUSHW(ssp, esp, 0xffff, next_eip);
1152
1153 /* update processor state */
1154 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1155 env->eip = offset;
1156 env->segs[R_CS].selector = selector;
1157 env->segs[R_CS].base = (selector << 4);
1158 env->eflags &= ~(TF_MASK | RF_MASK);
1159
1160 if (iopl < 3)
1161 env->eflags &= ~VIF_MASK;
1162 else
1163 env->eflags &= ~IF_MASK;
1164}
1165
1166#endif /* VBOX */
1167
1168#ifdef TARGET_X86_64
1169
1170#define PUSHQ(sp, val)\
1171{\
1172 sp -= 8;\
1173 stq_kernel(sp, (val));\
1174}
1175
1176#define POPQ(sp, val)\
1177{\
1178 val = ldq_kernel(sp);\
1179 sp += 8;\
1180}
1181
1182static inline target_ulong get_rsp_from_tss(int level)
1183{
1184 int index;
1185
1186#if 0
1187 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1188 env->tr.base, env->tr.limit);
1189#endif
1190
1191 if (!(env->tr.flags & DESC_P_MASK))
1192 cpu_abort(env, "invalid tss");
1193 index = 8 * level + 4;
1194 if ((index + 7) > env->tr.limit)
1195 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1196 return ldq_kernel(env->tr.base + index);
1197}
1198
1199/* 64 bit interrupt */
1200static void do_interrupt64(int intno, int is_int, int error_code,
1201 target_ulong next_eip, int is_hw)
1202{
1203 SegmentCache *dt;
1204 target_ulong ptr;
1205 int type, dpl, selector, cpl, ist;
1206 int has_error_code, new_stack;
1207 uint32_t e1, e2, e3, ss;
1208 target_ulong old_eip, esp, offset;
1209
1210#ifdef VBOX
1211 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1212 cpu_loop_exit();
1213#endif
1214
1215 has_error_code = 0;
1216 if (!is_int && !is_hw)
1217 has_error_code = exeption_has_error_code(intno);
1218 if (is_int)
1219 old_eip = next_eip;
1220 else
1221 old_eip = env->eip;
1222
1223 dt = &env->idt;
1224 if (intno * 16 + 15 > dt->limit)
1225 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1226 ptr = dt->base + intno * 16;
1227 e1 = ldl_kernel(ptr);
1228 e2 = ldl_kernel(ptr + 4);
1229 e3 = ldl_kernel(ptr + 8);
1230 /* check gate type */
1231 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1232 switch(type) {
1233 case 14: /* 386 interrupt gate */
1234 case 15: /* 386 trap gate */
1235 break;
1236 default:
1237 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1238 break;
1239 }
1240 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1241 cpl = env->hflags & HF_CPL_MASK;
1242 /* check privilege if software int */
1243 if (is_int && dpl < cpl)
1244 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1245 /* check valid bit */
1246 if (!(e2 & DESC_P_MASK))
1247 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1248 selector = e1 >> 16;
1249 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1250 ist = e2 & 7;
1251 if ((selector & 0xfffc) == 0)
1252 raise_exception_err(EXCP0D_GPF, 0);
1253
1254 if (load_segment(&e1, &e2, selector) != 0)
1255 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1256 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1257 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1258 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1259 if (dpl > cpl)
1260 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1261 if (!(e2 & DESC_P_MASK))
1262 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1263 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1264 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1265 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1266 /* to inner privilege */
1267 if (ist != 0)
1268 esp = get_rsp_from_tss(ist + 3);
1269 else
1270 esp = get_rsp_from_tss(dpl);
1271 esp &= ~0xfLL; /* align stack */
1272 ss = 0;
1273 new_stack = 1;
1274 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1275 /* to same privilege */
1276 if (env->eflags & VM_MASK)
1277 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1278 new_stack = 0;
1279 if (ist != 0)
1280 esp = get_rsp_from_tss(ist + 3);
1281 else
1282 esp = ESP;
1283 esp &= ~0xfLL; /* align stack */
1284 dpl = cpl;
1285 } else {
1286 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1287 new_stack = 0; /* avoid warning */
1288 esp = 0; /* avoid warning */
1289 }
1290
1291 PUSHQ(esp, env->segs[R_SS].selector);
1292 PUSHQ(esp, ESP);
1293 PUSHQ(esp, compute_eflags());
1294 PUSHQ(esp, env->segs[R_CS].selector);
1295 PUSHQ(esp, old_eip);
1296 if (has_error_code) {
1297 PUSHQ(esp, error_code);
1298 }
1299
1300 if (new_stack) {
1301 ss = 0 | dpl;
1302 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1303 }
1304 ESP = esp;
1305
1306 selector = (selector & ~3) | dpl;
1307 cpu_x86_load_seg_cache(env, R_CS, selector,
1308 get_seg_base(e1, e2),
1309 get_seg_limit(e1, e2),
1310 e2);
1311 cpu_x86_set_cpl(env, dpl);
1312 env->eip = offset;
1313
1314 /* interrupt gate clear IF mask */
1315 if ((type & 1) == 0) {
1316 env->eflags &= ~IF_MASK;
1317 }
1318#ifndef VBOX
1319 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1320#else /* VBOX */
1321 /*
1322 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1323 * gets confused by seemingly changed EFLAGS. See #3491 and
1324 * public bug #2341.
1325 */
1326 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1327#endif /* VBOX */
1328}
1329#endif
1330
1331#ifdef TARGET_X86_64
1332#if defined(CONFIG_USER_ONLY)
1333void helper_syscall(int next_eip_addend)
1334{
1335 env->exception_index = EXCP_SYSCALL;
1336 env->exception_next_eip = env->eip + next_eip_addend;
1337 cpu_loop_exit();
1338}
1339#else
1340void helper_syscall(int next_eip_addend)
1341{
1342 int selector;
1343
1344 if (!(env->efer & MSR_EFER_SCE)) {
1345 raise_exception_err(EXCP06_ILLOP, 0);
1346 }
1347 selector = (env->star >> 32) & 0xffff;
1348 if (env->hflags & HF_LMA_MASK) {
1349 int code64;
1350
1351 ECX = env->eip + next_eip_addend;
1352 env->regs[11] = compute_eflags();
1353
1354 code64 = env->hflags & HF_CS64_MASK;
1355
1356 cpu_x86_set_cpl(env, 0);
1357 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1358 0, 0xffffffff,
1359 DESC_G_MASK | DESC_P_MASK |
1360 DESC_S_MASK |
1361 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1362 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1363 0, 0xffffffff,
1364 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1365 DESC_S_MASK |
1366 DESC_W_MASK | DESC_A_MASK);
1367 env->eflags &= ~env->fmask;
1368 load_eflags(env->eflags, 0);
1369 if (code64)
1370 env->eip = env->lstar;
1371 else
1372 env->eip = env->cstar;
1373 } else {
1374 ECX = (uint32_t)(env->eip + next_eip_addend);
1375
1376 cpu_x86_set_cpl(env, 0);
1377 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1378 0, 0xffffffff,
1379 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1380 DESC_S_MASK |
1381 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1382 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1383 0, 0xffffffff,
1384 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1385 DESC_S_MASK |
1386 DESC_W_MASK | DESC_A_MASK);
1387 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1388 env->eip = (uint32_t)env->star;
1389 }
1390}
1391#endif
1392#endif
1393
1394#ifdef TARGET_X86_64
1395void helper_sysret(int dflag)
1396{
1397 int cpl, selector;
1398
1399 if (!(env->efer & MSR_EFER_SCE)) {
1400 raise_exception_err(EXCP06_ILLOP, 0);
1401 }
1402 cpl = env->hflags & HF_CPL_MASK;
1403 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1404 raise_exception_err(EXCP0D_GPF, 0);
1405 }
1406 selector = (env->star >> 48) & 0xffff;
1407 if (env->hflags & HF_LMA_MASK) {
1408 if (dflag == 2) {
1409 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1410 0, 0xffffffff,
1411 DESC_G_MASK | DESC_P_MASK |
1412 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1413 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1414 DESC_L_MASK);
1415 env->eip = ECX;
1416 } else {
1417 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1418 0, 0xffffffff,
1419 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1420 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1421 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1422 env->eip = (uint32_t)ECX;
1423 }
1424 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1425 0, 0xffffffff,
1426 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1427 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1428 DESC_W_MASK | DESC_A_MASK);
1429 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1430 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1431 cpu_x86_set_cpl(env, 3);
1432 } else {
1433 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1434 0, 0xffffffff,
1435 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1436 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1437 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1438 env->eip = (uint32_t)ECX;
1439 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1440 0, 0xffffffff,
1441 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1442 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1443 DESC_W_MASK | DESC_A_MASK);
1444 env->eflags |= IF_MASK;
1445 cpu_x86_set_cpl(env, 3);
1446 }
1447}
1448#endif
1449
1450#ifdef VBOX
1451
1452/**
1453 * Checks and processes external VMM events.
1454 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1455 */
1456void helper_external_event(void)
1457{
1458# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1459 uintptr_t uSP;
1460# ifdef RT_ARCH_AMD64
1461 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1462# else
1463 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1464# endif
1465 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1466# endif
1467 /* Keep in sync with flags checked by gen_check_external_event() */
1468 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1469 {
1470 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1471 ~CPU_INTERRUPT_EXTERNAL_HARD);
1472 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1473 }
1474 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1475 {
1476 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1477 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1478 cpu_exit(env);
1479 }
1480 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1481 {
1482 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1483 ~CPU_INTERRUPT_EXTERNAL_DMA);
1484 remR3DmaRun(env);
1485 }
1486 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1487 {
1488 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1489 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1490 remR3TimersRun(env);
1491 }
1492 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1493 {
1494 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1495 ~CPU_INTERRUPT_EXTERNAL_HARD);
1496 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1497 }
1498}
1499
1500/* helper for recording call instruction addresses for later scanning */
1501void helper_record_call()
1502{
1503 if ( !(env->state & CPU_RAW_RING0)
1504 && (env->cr[0] & CR0_PG_MASK)
1505 && !(env->eflags & X86_EFL_IF))
1506 remR3RecordCall(env);
1507}
1508
1509#endif /* VBOX */
1510
1511/* real mode interrupt */
1512static void do_interrupt_real(int intno, int is_int, int error_code,
1513 unsigned int next_eip)
1514{
1515 SegmentCache *dt;
1516 target_ulong ptr, ssp;
1517 int selector;
1518 uint32_t offset, esp;
1519 uint32_t old_cs, old_eip;
1520
1521 /* real mode (simpler !) */
1522 dt = &env->idt;
1523#ifndef VBOX
1524 if (intno * 4 + 3 > dt->limit)
1525#else
1526 if ((unsigned)intno * 4 + 3 > dt->limit)
1527#endif
1528 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1529 ptr = dt->base + intno * 4;
1530 offset = lduw_kernel(ptr);
1531 selector = lduw_kernel(ptr + 2);
1532 esp = ESP;
1533 ssp = env->segs[R_SS].base;
1534 if (is_int)
1535 old_eip = next_eip;
1536 else
1537 old_eip = env->eip;
1538 old_cs = env->segs[R_CS].selector;
1539 /* XXX: use SS segment size ? */
1540 PUSHW(ssp, esp, 0xffff, compute_eflags());
1541 PUSHW(ssp, esp, 0xffff, old_cs);
1542 PUSHW(ssp, esp, 0xffff, old_eip);
1543
1544 /* update processor state */
1545 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1546 env->eip = offset;
1547 env->segs[R_CS].selector = selector;
1548 env->segs[R_CS].base = (selector << 4);
1549 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1550}
1551
1552/* fake user mode interrupt */
1553void do_interrupt_user(int intno, int is_int, int error_code,
1554 target_ulong next_eip)
1555{
1556 SegmentCache *dt;
1557 target_ulong ptr;
1558 int dpl, cpl, shift;
1559 uint32_t e2;
1560
1561 dt = &env->idt;
1562 if (env->hflags & HF_LMA_MASK) {
1563 shift = 4;
1564 } else {
1565 shift = 3;
1566 }
1567 ptr = dt->base + (intno << shift);
1568 e2 = ldl_kernel(ptr + 4);
1569
1570 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1571 cpl = env->hflags & HF_CPL_MASK;
1572 /* check privilege if software int */
1573 if (is_int && dpl < cpl)
1574 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1575
1576 /* Since we emulate only user space, we cannot do more than
1577 exiting the emulation with the suitable exception and error
1578 code */
1579 if (is_int)
1580 EIP = next_eip;
1581}
1582
1583#if !defined(CONFIG_USER_ONLY)
1584static void handle_even_inj(int intno, int is_int, int error_code,
1585 int is_hw, int rm)
1586{
1587 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1588 if (!(event_inj & SVM_EVTINJ_VALID)) {
1589 int type;
1590 if (is_int)
1591 type = SVM_EVTINJ_TYPE_SOFT;
1592 else
1593 type = SVM_EVTINJ_TYPE_EXEPT;
1594 event_inj = intno | type | SVM_EVTINJ_VALID;
1595 if (!rm && exeption_has_error_code(intno)) {
1596 event_inj |= SVM_EVTINJ_VALID_ERR;
1597 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1598 }
1599 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1600 }
1601}
1602#endif
1603
1604/*
1605 * Begin execution of an interruption. is_int is TRUE if coming from
1606 * the int instruction. next_eip is the EIP value AFTER the interrupt
1607 * instruction. It is only relevant if is_int is TRUE.
1608 */
1609void do_interrupt(int intno, int is_int, int error_code,
1610 target_ulong next_eip, int is_hw)
1611{
1612 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1613 if ((env->cr[0] & CR0_PE_MASK)) {
1614 static int count;
1615 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1616 count, intno, error_code, is_int,
1617 env->hflags & HF_CPL_MASK,
1618 env->segs[R_CS].selector, EIP,
1619 (int)env->segs[R_CS].base + EIP,
1620 env->segs[R_SS].selector, ESP);
1621 if (intno == 0x0e) {
1622 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1623 } else {
1624 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1625 }
1626 qemu_log("\n");
1627 log_cpu_state(env, X86_DUMP_CCOP);
1628#if 0
1629 {
1630 int i;
1631 uint8_t *ptr;
1632 qemu_log(" code=");
1633 ptr = env->segs[R_CS].base + env->eip;
1634 for(i = 0; i < 16; i++) {
1635 qemu_log(" %02x", ldub(ptr + i));
1636 }
1637 qemu_log("\n");
1638 }
1639#endif
1640 count++;
1641 }
1642 }
1643#ifdef VBOX
1644 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1645 if (is_int) {
1646 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1647 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1648 } else {
1649 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1650 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1651 }
1652 }
1653#endif
1654 if (env->cr[0] & CR0_PE_MASK) {
1655#if !defined(CONFIG_USER_ONLY)
1656 if (env->hflags & HF_SVMI_MASK)
1657 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1658#endif
1659#ifdef TARGET_X86_64
1660 if (env->hflags & HF_LMA_MASK) {
1661 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1662 } else
1663#endif
1664 {
1665#ifdef VBOX
1666 /* int xx *, v86 code and VME enabled? */
1667 if ( (env->eflags & VM_MASK)
1668 && (env->cr[4] & CR4_VME_MASK)
1669 && is_int
1670 && !is_hw
1671 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1672 )
1673 do_soft_interrupt_vme(intno, error_code, next_eip);
1674 else
1675#endif /* VBOX */
1676 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1677 }
1678 } else {
1679#if !defined(CONFIG_USER_ONLY)
1680 if (env->hflags & HF_SVMI_MASK)
1681 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1682#endif
1683 do_interrupt_real(intno, is_int, error_code, next_eip);
1684 }
1685
1686#if !defined(CONFIG_USER_ONLY)
1687 if (env->hflags & HF_SVMI_MASK) {
1688 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1689 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1690 }
1691#endif
1692}
1693
1694/* This should come from sysemu.h - if we could include it here... */
1695void qemu_system_reset_request(void);
1696
1697/*
1698 * Check nested exceptions and change to double or triple fault if
1699 * needed. It should only be called, if this is not an interrupt.
1700 * Returns the new exception number.
1701 */
1702static int check_exception(int intno, int *error_code)
1703{
1704 int first_contributory = env->old_exception == 0 ||
1705 (env->old_exception >= 10 &&
1706 env->old_exception <= 13);
1707 int second_contributory = intno == 0 ||
1708 (intno >= 10 && intno <= 13);
1709
1710 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1711 env->old_exception, intno);
1712
1713#if !defined(CONFIG_USER_ONLY)
1714 if (env->old_exception == EXCP08_DBLE) {
1715 if (env->hflags & HF_SVMI_MASK)
1716 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1717
1718 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1719
1720# ifndef VBOX
1721 qemu_system_reset_request();
1722# else
1723 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1724# endif
1725 return EXCP_HLT;
1726 }
1727#endif
1728
1729 if ((first_contributory && second_contributory)
1730 || (env->old_exception == EXCP0E_PAGE &&
1731 (second_contributory || (intno == EXCP0E_PAGE)))) {
1732 intno = EXCP08_DBLE;
1733 *error_code = 0;
1734 }
1735
1736 if (second_contributory || (intno == EXCP0E_PAGE) ||
1737 (intno == EXCP08_DBLE))
1738 env->old_exception = intno;
1739
1740 return intno;
1741}
1742
1743/*
1744 * Signal an interruption. It is executed in the main CPU loop.
1745 * is_int is TRUE if coming from the int instruction. next_eip is the
1746 * EIP value AFTER the interrupt instruction. It is only relevant if
1747 * is_int is TRUE.
1748 */
1749static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1750 int next_eip_addend)
1751{
1752#if defined(VBOX) && defined(DEBUG)
1753 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1754#endif
1755 if (!is_int) {
1756 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1757 intno = check_exception(intno, &error_code);
1758 } else {
1759 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1760 }
1761
1762 env->exception_index = intno;
1763 env->error_code = error_code;
1764 env->exception_is_int = is_int;
1765 env->exception_next_eip = env->eip + next_eip_addend;
1766 cpu_loop_exit();
1767}
1768
1769/* shortcuts to generate exceptions */
1770
1771void raise_exception_err(int exception_index, int error_code)
1772{
1773 raise_interrupt(exception_index, 0, error_code, 0);
1774}
1775
1776void raise_exception(int exception_index)
1777{
1778 raise_interrupt(exception_index, 0, 0, 0);
1779}
1780
1781void raise_exception_env(int exception_index, CPUState *nenv)
1782{
1783 env = nenv;
1784 raise_exception(exception_index);
1785}
1786/* SMM support */
1787
1788#if defined(CONFIG_USER_ONLY)
1789
1790void do_smm_enter(void)
1791{
1792}
1793
1794void helper_rsm(void)
1795{
1796}
1797
1798#else
1799
1800#ifdef TARGET_X86_64
1801#define SMM_REVISION_ID 0x00020064
1802#else
1803#define SMM_REVISION_ID 0x00020000
1804#endif
1805
1806void do_smm_enter(void)
1807{
1808 target_ulong sm_state;
1809 SegmentCache *dt;
1810 int i, offset;
1811
1812 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1813 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1814
1815 env->hflags |= HF_SMM_MASK;
1816 cpu_smm_update(env);
1817
1818 sm_state = env->smbase + 0x8000;
1819
1820#ifdef TARGET_X86_64
1821 for(i = 0; i < 6; i++) {
1822 dt = &env->segs[i];
1823 offset = 0x7e00 + i * 16;
1824 stw_phys(sm_state + offset, dt->selector);
1825 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1826 stl_phys(sm_state + offset + 4, dt->limit);
1827 stq_phys(sm_state + offset + 8, dt->base);
1828 }
1829
1830 stq_phys(sm_state + 0x7e68, env->gdt.base);
1831 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1832
1833 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1834 stq_phys(sm_state + 0x7e78, env->ldt.base);
1835 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1836 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1837
1838 stq_phys(sm_state + 0x7e88, env->idt.base);
1839 stl_phys(sm_state + 0x7e84, env->idt.limit);
1840
1841 stw_phys(sm_state + 0x7e90, env->tr.selector);
1842 stq_phys(sm_state + 0x7e98, env->tr.base);
1843 stl_phys(sm_state + 0x7e94, env->tr.limit);
1844 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1845
1846 stq_phys(sm_state + 0x7ed0, env->efer);
1847
1848 stq_phys(sm_state + 0x7ff8, EAX);
1849 stq_phys(sm_state + 0x7ff0, ECX);
1850 stq_phys(sm_state + 0x7fe8, EDX);
1851 stq_phys(sm_state + 0x7fe0, EBX);
1852 stq_phys(sm_state + 0x7fd8, ESP);
1853 stq_phys(sm_state + 0x7fd0, EBP);
1854 stq_phys(sm_state + 0x7fc8, ESI);
1855 stq_phys(sm_state + 0x7fc0, EDI);
1856 for(i = 8; i < 16; i++)
1857 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1858 stq_phys(sm_state + 0x7f78, env->eip);
1859 stl_phys(sm_state + 0x7f70, compute_eflags());
1860 stl_phys(sm_state + 0x7f68, env->dr[6]);
1861 stl_phys(sm_state + 0x7f60, env->dr[7]);
1862
1863 stl_phys(sm_state + 0x7f48, env->cr[4]);
1864 stl_phys(sm_state + 0x7f50, env->cr[3]);
1865 stl_phys(sm_state + 0x7f58, env->cr[0]);
1866
1867 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1868 stl_phys(sm_state + 0x7f00, env->smbase);
1869#else
1870 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1871 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1872 stl_phys(sm_state + 0x7ff4, compute_eflags());
1873 stl_phys(sm_state + 0x7ff0, env->eip);
1874 stl_phys(sm_state + 0x7fec, EDI);
1875 stl_phys(sm_state + 0x7fe8, ESI);
1876 stl_phys(sm_state + 0x7fe4, EBP);
1877 stl_phys(sm_state + 0x7fe0, ESP);
1878 stl_phys(sm_state + 0x7fdc, EBX);
1879 stl_phys(sm_state + 0x7fd8, EDX);
1880 stl_phys(sm_state + 0x7fd4, ECX);
1881 stl_phys(sm_state + 0x7fd0, EAX);
1882 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1883 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1884
1885 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1886 stl_phys(sm_state + 0x7f64, env->tr.base);
1887 stl_phys(sm_state + 0x7f60, env->tr.limit);
1888 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1889
1890 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1891 stl_phys(sm_state + 0x7f80, env->ldt.base);
1892 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1893 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1894
1895 stl_phys(sm_state + 0x7f74, env->gdt.base);
1896 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1897
1898 stl_phys(sm_state + 0x7f58, env->idt.base);
1899 stl_phys(sm_state + 0x7f54, env->idt.limit);
1900
1901 for(i = 0; i < 6; i++) {
1902 dt = &env->segs[i];
1903 if (i < 3)
1904 offset = 0x7f84 + i * 12;
1905 else
1906 offset = 0x7f2c + (i - 3) * 12;
1907 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1908 stl_phys(sm_state + offset + 8, dt->base);
1909 stl_phys(sm_state + offset + 4, dt->limit);
1910 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1911 }
1912 stl_phys(sm_state + 0x7f14, env->cr[4]);
1913
1914 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1915 stl_phys(sm_state + 0x7ef8, env->smbase);
1916#endif
1917 /* init SMM cpu state */
1918
1919#ifdef TARGET_X86_64
1920 cpu_load_efer(env, 0);
1921#endif
1922 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1923 env->eip = 0x00008000;
1924 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1925 0xffffffff, 0);
1926 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1927 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1928 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1929 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1930 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1931
1932 cpu_x86_update_cr0(env,
1933 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1934 cpu_x86_update_cr4(env, 0);
1935 env->dr[7] = 0x00000400;
1936 CC_OP = CC_OP_EFLAGS;
1937}
1938
1939void helper_rsm(void)
1940{
1941#ifdef VBOX
1942 cpu_abort(env, "helper_rsm");
1943#else /* !VBOX */
1944 target_ulong sm_state;
1945 int i, offset;
1946 uint32_t val;
1947
1948 sm_state = env->smbase + 0x8000;
1949#ifdef TARGET_X86_64
1950 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1951
1952 for(i = 0; i < 6; i++) {
1953 offset = 0x7e00 + i * 16;
1954 cpu_x86_load_seg_cache(env, i,
1955 lduw_phys(sm_state + offset),
1956 ldq_phys(sm_state + offset + 8),
1957 ldl_phys(sm_state + offset + 4),
1958 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1959 }
1960
1961 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1962 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1963
1964 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1965 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1966 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1967 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1968#ifdef VBOX
1969 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1970 env->ldt.newselector = 0;
1971#endif
1972
1973 env->idt.base = ldq_phys(sm_state + 0x7e88);
1974 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1975
1976 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1977 env->tr.base = ldq_phys(sm_state + 0x7e98);
1978 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1979 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1980#ifdef VBOX
1981 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
1982 env->tr.newselector = 0;
1983#endif
1984
1985 EAX = ldq_phys(sm_state + 0x7ff8);
1986 ECX = ldq_phys(sm_state + 0x7ff0);
1987 EDX = ldq_phys(sm_state + 0x7fe8);
1988 EBX = ldq_phys(sm_state + 0x7fe0);
1989 ESP = ldq_phys(sm_state + 0x7fd8);
1990 EBP = ldq_phys(sm_state + 0x7fd0);
1991 ESI = ldq_phys(sm_state + 0x7fc8);
1992 EDI = ldq_phys(sm_state + 0x7fc0);
1993 for(i = 8; i < 16; i++)
1994 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1995 env->eip = ldq_phys(sm_state + 0x7f78);
1996 load_eflags(ldl_phys(sm_state + 0x7f70),
1997 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1998 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1999 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2000
2001 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2002 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2003 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2004
2005 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2006 if (val & 0x20000) {
2007 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2008 }
2009#else
2010 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2011 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2012 load_eflags(ldl_phys(sm_state + 0x7ff4),
2013 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2014 env->eip = ldl_phys(sm_state + 0x7ff0);
2015 EDI = ldl_phys(sm_state + 0x7fec);
2016 ESI = ldl_phys(sm_state + 0x7fe8);
2017 EBP = ldl_phys(sm_state + 0x7fe4);
2018 ESP = ldl_phys(sm_state + 0x7fe0);
2019 EBX = ldl_phys(sm_state + 0x7fdc);
2020 EDX = ldl_phys(sm_state + 0x7fd8);
2021 ECX = ldl_phys(sm_state + 0x7fd4);
2022 EAX = ldl_phys(sm_state + 0x7fd0);
2023 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2024 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2025
2026 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2027 env->tr.base = ldl_phys(sm_state + 0x7f64);
2028 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2029 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2030#ifdef VBOX
2031 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2032 env->tr.newselector = 0;
2033#endif
2034
2035 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2036 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2037 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2038 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2039#ifdef VBOX
2040 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2041 env->ldt.newselector = 0;
2042#endif
2043
2044 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2045 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2046
2047 env->idt.base = ldl_phys(sm_state + 0x7f58);
2048 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2049
2050 for(i = 0; i < 6; i++) {
2051 if (i < 3)
2052 offset = 0x7f84 + i * 12;
2053 else
2054 offset = 0x7f2c + (i - 3) * 12;
2055 cpu_x86_load_seg_cache(env, i,
2056 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2057 ldl_phys(sm_state + offset + 8),
2058 ldl_phys(sm_state + offset + 4),
2059 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2060 }
2061 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2062
2063 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2064 if (val & 0x20000) {
2065 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2066 }
2067#endif
2068 CC_OP = CC_OP_EFLAGS;
2069 env->hflags &= ~HF_SMM_MASK;
2070 cpu_smm_update(env);
2071
2072 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2073 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2074#endif /* !VBOX */
2075}
2076
2077#endif /* !CONFIG_USER_ONLY */
2078
2079
2080/* division, flags are undefined */
2081
2082void helper_divb_AL(target_ulong t0)
2083{
2084 unsigned int num, den, q, r;
2085
2086 num = (EAX & 0xffff);
2087 den = (t0 & 0xff);
2088 if (den == 0) {
2089 raise_exception(EXCP00_DIVZ);
2090 }
2091 q = (num / den);
2092 if (q > 0xff)
2093 raise_exception(EXCP00_DIVZ);
2094 q &= 0xff;
2095 r = (num % den) & 0xff;
2096 EAX = (EAX & ~0xffff) | (r << 8) | q;
2097}
2098
2099void helper_idivb_AL(target_ulong t0)
2100{
2101 int num, den, q, r;
2102
2103 num = (int16_t)EAX;
2104 den = (int8_t)t0;
2105 if (den == 0) {
2106 raise_exception(EXCP00_DIVZ);
2107 }
2108 q = (num / den);
2109 if (q != (int8_t)q)
2110 raise_exception(EXCP00_DIVZ);
2111 q &= 0xff;
2112 r = (num % den) & 0xff;
2113 EAX = (EAX & ~0xffff) | (r << 8) | q;
2114}
2115
2116void helper_divw_AX(target_ulong t0)
2117{
2118 unsigned int num, den, q, r;
2119
2120 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2121 den = (t0 & 0xffff);
2122 if (den == 0) {
2123 raise_exception(EXCP00_DIVZ);
2124 }
2125 q = (num / den);
2126 if (q > 0xffff)
2127 raise_exception(EXCP00_DIVZ);
2128 q &= 0xffff;
2129 r = (num % den) & 0xffff;
2130 EAX = (EAX & ~0xffff) | q;
2131 EDX = (EDX & ~0xffff) | r;
2132}
2133
2134void helper_idivw_AX(target_ulong t0)
2135{
2136 int num, den, q, r;
2137
2138 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2139 den = (int16_t)t0;
2140 if (den == 0) {
2141 raise_exception(EXCP00_DIVZ);
2142 }
2143 q = (num / den);
2144 if (q != (int16_t)q)
2145 raise_exception(EXCP00_DIVZ);
2146 q &= 0xffff;
2147 r = (num % den) & 0xffff;
2148 EAX = (EAX & ~0xffff) | q;
2149 EDX = (EDX & ~0xffff) | r;
2150}
2151
2152void helper_divl_EAX(target_ulong t0)
2153{
2154 unsigned int den, r;
2155 uint64_t num, q;
2156
2157 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2158 den = t0;
2159 if (den == 0) {
2160 raise_exception(EXCP00_DIVZ);
2161 }
2162 q = (num / den);
2163 r = (num % den);
2164 if (q > 0xffffffff)
2165 raise_exception(EXCP00_DIVZ);
2166 EAX = (uint32_t)q;
2167 EDX = (uint32_t)r;
2168}
2169
2170void helper_idivl_EAX(target_ulong t0)
2171{
2172 int den, r;
2173 int64_t num, q;
2174
2175 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2176 den = t0;
2177 if (den == 0) {
2178 raise_exception(EXCP00_DIVZ);
2179 }
2180 q = (num / den);
2181 r = (num % den);
2182 if (q != (int32_t)q)
2183 raise_exception(EXCP00_DIVZ);
2184 EAX = (uint32_t)q;
2185 EDX = (uint32_t)r;
2186}
2187
2188/* bcd */
2189
2190/* XXX: exception */
2191void helper_aam(int base)
2192{
2193 int al, ah;
2194 al = EAX & 0xff;
2195 ah = al / base;
2196 al = al % base;
2197 EAX = (EAX & ~0xffff) | al | (ah << 8);
2198 CC_DST = al;
2199}
2200
2201void helper_aad(int base)
2202{
2203 int al, ah;
2204 al = EAX & 0xff;
2205 ah = (EAX >> 8) & 0xff;
2206 al = ((ah * base) + al) & 0xff;
2207 EAX = (EAX & ~0xffff) | al;
2208 CC_DST = al;
2209}
2210
2211void helper_aaa(void)
2212{
2213 int icarry;
2214 int al, ah, af;
2215 int eflags;
2216
2217 eflags = helper_cc_compute_all(CC_OP);
2218 af = eflags & CC_A;
2219 al = EAX & 0xff;
2220 ah = (EAX >> 8) & 0xff;
2221
2222 icarry = (al > 0xf9);
2223 if (((al & 0x0f) > 9 ) || af) {
2224 al = (al + 6) & 0x0f;
2225 ah = (ah + 1 + icarry) & 0xff;
2226 eflags |= CC_C | CC_A;
2227 } else {
2228 eflags &= ~(CC_C | CC_A);
2229 al &= 0x0f;
2230 }
2231 EAX = (EAX & ~0xffff) | al | (ah << 8);
2232 CC_SRC = eflags;
2233}
2234
2235void helper_aas(void)
2236{
2237 int icarry;
2238 int al, ah, af;
2239 int eflags;
2240
2241 eflags = helper_cc_compute_all(CC_OP);
2242 af = eflags & CC_A;
2243 al = EAX & 0xff;
2244 ah = (EAX >> 8) & 0xff;
2245
2246 icarry = (al < 6);
2247 if (((al & 0x0f) > 9 ) || af) {
2248 al = (al - 6) & 0x0f;
2249 ah = (ah - 1 - icarry) & 0xff;
2250 eflags |= CC_C | CC_A;
2251 } else {
2252 eflags &= ~(CC_C | CC_A);
2253 al &= 0x0f;
2254 }
2255 EAX = (EAX & ~0xffff) | al | (ah << 8);
2256 CC_SRC = eflags;
2257}
2258
2259void helper_daa(void)
2260{
2261 int al, af, cf;
2262 int eflags;
2263
2264 eflags = helper_cc_compute_all(CC_OP);
2265 cf = eflags & CC_C;
2266 af = eflags & CC_A;
2267 al = EAX & 0xff;
2268
2269 eflags = 0;
2270 if (((al & 0x0f) > 9 ) || af) {
2271 al = (al + 6) & 0xff;
2272 eflags |= CC_A;
2273 }
2274 if ((al > 0x9f) || cf) {
2275 al = (al + 0x60) & 0xff;
2276 eflags |= CC_C;
2277 }
2278 EAX = (EAX & ~0xff) | al;
2279 /* well, speed is not an issue here, so we compute the flags by hand */
2280 eflags |= (al == 0) << 6; /* zf */
2281 eflags |= parity_table[al]; /* pf */
2282 eflags |= (al & 0x80); /* sf */
2283 CC_SRC = eflags;
2284}
2285
2286void helper_das(void)
2287{
2288 int al, al1, af, cf;
2289 int eflags;
2290
2291 eflags = helper_cc_compute_all(CC_OP);
2292 cf = eflags & CC_C;
2293 af = eflags & CC_A;
2294 al = EAX & 0xff;
2295
2296 eflags = 0;
2297 al1 = al;
2298 if (((al & 0x0f) > 9 ) || af) {
2299 eflags |= CC_A;
2300 if (al < 6 || cf)
2301 eflags |= CC_C;
2302 al = (al - 6) & 0xff;
2303 }
2304 if ((al1 > 0x99) || cf) {
2305 al = (al - 0x60) & 0xff;
2306 eflags |= CC_C;
2307 }
2308 EAX = (EAX & ~0xff) | al;
2309 /* well, speed is not an issue here, so we compute the flags by hand */
2310 eflags |= (al == 0) << 6; /* zf */
2311 eflags |= parity_table[al]; /* pf */
2312 eflags |= (al & 0x80); /* sf */
2313 CC_SRC = eflags;
2314}
2315
2316void helper_into(int next_eip_addend)
2317{
2318 int eflags;
2319 eflags = helper_cc_compute_all(CC_OP);
2320 if (eflags & CC_O) {
2321 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2322 }
2323}
2324
2325void helper_cmpxchg8b(target_ulong a0)
2326{
2327 uint64_t d;
2328 int eflags;
2329
2330 eflags = helper_cc_compute_all(CC_OP);
2331 d = ldq(a0);
2332 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2333 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2334 eflags |= CC_Z;
2335 } else {
2336 /* always do the store */
2337 stq(a0, d);
2338 EDX = (uint32_t)(d >> 32);
2339 EAX = (uint32_t)d;
2340 eflags &= ~CC_Z;
2341 }
2342 CC_SRC = eflags;
2343}
2344
2345#ifdef TARGET_X86_64
2346void helper_cmpxchg16b(target_ulong a0)
2347{
2348 uint64_t d0, d1;
2349 int eflags;
2350
2351 if ((a0 & 0xf) != 0)
2352 raise_exception(EXCP0D_GPF);
2353 eflags = helper_cc_compute_all(CC_OP);
2354 d0 = ldq(a0);
2355 d1 = ldq(a0 + 8);
2356 if (d0 == EAX && d1 == EDX) {
2357 stq(a0, EBX);
2358 stq(a0 + 8, ECX);
2359 eflags |= CC_Z;
2360 } else {
2361 /* always do the store */
2362 stq(a0, d0);
2363 stq(a0 + 8, d1);
2364 EDX = d1;
2365 EAX = d0;
2366 eflags &= ~CC_Z;
2367 }
2368 CC_SRC = eflags;
2369}
2370#endif
2371
2372void helper_single_step(void)
2373{
2374#ifndef CONFIG_USER_ONLY
2375 check_hw_breakpoints(env, 1);
2376 env->dr[6] |= DR6_BS;
2377#endif
2378 raise_exception(EXCP01_DB);
2379}
2380
2381void helper_cpuid(void)
2382{
2383 uint32_t eax, ebx, ecx, edx;
2384
2385 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2386
2387 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2388 EAX = eax;
2389 EBX = ebx;
2390 ECX = ecx;
2391 EDX = edx;
2392}
2393
2394void helper_enter_level(int level, int data32, target_ulong t1)
2395{
2396 target_ulong ssp;
2397 uint32_t esp_mask, esp, ebp;
2398
2399 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2400 ssp = env->segs[R_SS].base;
2401 ebp = EBP;
2402 esp = ESP;
2403 if (data32) {
2404 /* 32 bit */
2405 esp -= 4;
2406 while (--level) {
2407 esp -= 4;
2408 ebp -= 4;
2409 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2410 }
2411 esp -= 4;
2412 stl(ssp + (esp & esp_mask), t1);
2413 } else {
2414 /* 16 bit */
2415 esp -= 2;
2416 while (--level) {
2417 esp -= 2;
2418 ebp -= 2;
2419 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2420 }
2421 esp -= 2;
2422 stw(ssp + (esp & esp_mask), t1);
2423 }
2424}
2425
2426#ifdef TARGET_X86_64
2427void helper_enter64_level(int level, int data64, target_ulong t1)
2428{
2429 target_ulong esp, ebp;
2430 ebp = EBP;
2431 esp = ESP;
2432
2433 if (data64) {
2434 /* 64 bit */
2435 esp -= 8;
2436 while (--level) {
2437 esp -= 8;
2438 ebp -= 8;
2439 stq(esp, ldq(ebp));
2440 }
2441 esp -= 8;
2442 stq(esp, t1);
2443 } else {
2444 /* 16 bit */
2445 esp -= 2;
2446 while (--level) {
2447 esp -= 2;
2448 ebp -= 2;
2449 stw(esp, lduw(ebp));
2450 }
2451 esp -= 2;
2452 stw(esp, t1);
2453 }
2454}
2455#endif
2456
2457void helper_lldt(int selector)
2458{
2459 SegmentCache *dt;
2460 uint32_t e1, e2;
2461#ifndef VBOX
2462 int index, entry_limit;
2463#else
2464 unsigned int index, entry_limit;
2465#endif
2466 target_ulong ptr;
2467
2468#ifdef VBOX
2469 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2470 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2471#endif
2472
2473 selector &= 0xffff;
2474 if ((selector & 0xfffc) == 0) {
2475 /* XXX: NULL selector case: invalid LDT */
2476 env->ldt.base = 0;
2477 env->ldt.limit = 0;
2478#ifdef VBOX
2479 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2480 env->ldt.newselector = 0;
2481#endif
2482 } else {
2483 if (selector & 0x4)
2484 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2485 dt = &env->gdt;
2486 index = selector & ~7;
2487#ifdef TARGET_X86_64
2488 if (env->hflags & HF_LMA_MASK)
2489 entry_limit = 15;
2490 else
2491#endif
2492 entry_limit = 7;
2493 if ((index + entry_limit) > dt->limit)
2494 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2495 ptr = dt->base + index;
2496 e1 = ldl_kernel(ptr);
2497 e2 = ldl_kernel(ptr + 4);
2498 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2499 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2500 if (!(e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2502#ifdef TARGET_X86_64
2503 if (env->hflags & HF_LMA_MASK) {
2504 uint32_t e3;
2505 e3 = ldl_kernel(ptr + 8);
2506 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2507 env->ldt.base |= (target_ulong)e3 << 32;
2508 } else
2509#endif
2510 {
2511 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2512 }
2513 }
2514 env->ldt.selector = selector;
2515#ifdef VBOX
2516 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2517 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2518#endif
2519}
2520
2521void helper_ltr(int selector)
2522{
2523 SegmentCache *dt;
2524 uint32_t e1, e2;
2525#ifndef VBOX
2526 int index, type, entry_limit;
2527#else
2528 unsigned int index;
2529 int type, entry_limit;
2530#endif
2531 target_ulong ptr;
2532
2533#ifdef VBOX
2534 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2535 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2536 env->tr.flags, (RTSEL)(selector & 0xffff)));
2537#endif
2538 selector &= 0xffff;
2539 if ((selector & 0xfffc) == 0) {
2540 /* NULL selector case: invalid TR */
2541 env->tr.base = 0;
2542 env->tr.limit = 0;
2543 env->tr.flags = 0;
2544#ifdef VBOX
2545 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2546 env->tr.newselector = 0;
2547#endif
2548 } else {
2549 if (selector & 0x4)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 dt = &env->gdt;
2552 index = selector & ~7;
2553#ifdef TARGET_X86_64
2554 if (env->hflags & HF_LMA_MASK)
2555 entry_limit = 15;
2556 else
2557#endif
2558 entry_limit = 7;
2559 if ((index + entry_limit) > dt->limit)
2560 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2561 ptr = dt->base + index;
2562 e1 = ldl_kernel(ptr);
2563 e2 = ldl_kernel(ptr + 4);
2564 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2565 if ((e2 & DESC_S_MASK) ||
2566 (type != 1 && type != 9))
2567 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2568 if (!(e2 & DESC_P_MASK))
2569 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2570#ifdef TARGET_X86_64
2571 if (env->hflags & HF_LMA_MASK) {
2572 uint32_t e3, e4;
2573 e3 = ldl_kernel(ptr + 8);
2574 e4 = ldl_kernel(ptr + 12);
2575 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 load_seg_cache_raw_dt(&env->tr, e1, e2);
2578 env->tr.base |= (target_ulong)e3 << 32;
2579 } else
2580#endif
2581 {
2582 load_seg_cache_raw_dt(&env->tr, e1, e2);
2583 }
2584 e2 |= DESC_TSS_BUSY_MASK;
2585 stl_kernel(ptr + 4, e2);
2586 }
2587 env->tr.selector = selector;
2588#ifdef VBOX
2589 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2590 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2591 env->tr.flags, (RTSEL)(selector & 0xffff)));
2592#endif
2593}
2594
2595/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2596void helper_load_seg(int seg_reg, int selector)
2597{
2598 uint32_t e1, e2;
2599 int cpl, dpl, rpl;
2600 SegmentCache *dt;
2601#ifndef VBOX
2602 int index;
2603#else
2604 unsigned int index;
2605#endif
2606 target_ulong ptr;
2607
2608 selector &= 0xffff;
2609 cpl = env->hflags & HF_CPL_MASK;
2610#ifdef VBOX
2611
2612 /* Trying to load a selector with CPL=1? */
2613 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2614 {
2615 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2616 selector = selector & 0xfffc;
2617 }
2618#endif /* VBOX */
2619 if ((selector & 0xfffc) == 0) {
2620 /* null selector case */
2621 if (seg_reg == R_SS
2622#ifdef TARGET_X86_64
2623 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2624#endif
2625 )
2626 raise_exception_err(EXCP0D_GPF, 0);
2627 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2628 } else {
2629
2630 if (selector & 0x4)
2631 dt = &env->ldt;
2632 else
2633 dt = &env->gdt;
2634 index = selector & ~7;
2635 if ((index + 7) > dt->limit)
2636 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2637 ptr = dt->base + index;
2638 e1 = ldl_kernel(ptr);
2639 e2 = ldl_kernel(ptr + 4);
2640
2641 if (!(e2 & DESC_S_MASK))
2642 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2643 rpl = selector & 3;
2644 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2645 if (seg_reg == R_SS) {
2646 /* must be writable segment */
2647 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2648 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2649 if (rpl != cpl || dpl != cpl)
2650 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2651 } else {
2652 /* must be readable segment */
2653 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2654 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2655
2656 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2657 /* if not conforming code, test rights */
2658 if (dpl < cpl || dpl < rpl)
2659 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2660 }
2661 }
2662
2663 if (!(e2 & DESC_P_MASK)) {
2664 if (seg_reg == R_SS)
2665 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2666 else
2667 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2668 }
2669
2670 /* set the access bit if not already set */
2671 if (!(e2 & DESC_A_MASK)) {
2672 e2 |= DESC_A_MASK;
2673 stl_kernel(ptr + 4, e2);
2674 }
2675
2676 cpu_x86_load_seg_cache(env, seg_reg, selector,
2677 get_seg_base(e1, e2),
2678 get_seg_limit(e1, e2),
2679 e2);
2680#if 0
2681 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2682 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2683#endif
2684 }
2685}
2686
2687/* protected mode jump */
2688void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2689 int next_eip_addend)
2690{
2691 int gate_cs, type;
2692 uint32_t e1, e2, cpl, dpl, rpl, limit;
2693 target_ulong next_eip;
2694
2695#ifdef VBOX /** @todo Why do we do this? */
2696 e1 = e2 = 0;
2697#endif
2698 if ((new_cs & 0xfffc) == 0)
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 if (load_segment(&e1, &e2, new_cs) != 0)
2701 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2702 cpl = env->hflags & HF_CPL_MASK;
2703 if (e2 & DESC_S_MASK) {
2704 if (!(e2 & DESC_CS_MASK))
2705 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2707 if (e2 & DESC_C_MASK) {
2708 /* conforming code segment */
2709 if (dpl > cpl)
2710 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2711 } else {
2712 /* non conforming code segment */
2713 rpl = new_cs & 3;
2714 if (rpl > cpl)
2715 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2716 if (dpl != cpl)
2717 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2718 }
2719 if (!(e2 & DESC_P_MASK))
2720 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2721 limit = get_seg_limit(e1, e2);
2722 if (new_eip > limit &&
2723 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2724 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2726 get_seg_base(e1, e2), limit, e2);
2727 EIP = new_eip;
2728 } else {
2729 /* jump to call or task gate */
2730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2731 rpl = new_cs & 3;
2732 cpl = env->hflags & HF_CPL_MASK;
2733 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2734 switch(type) {
2735 case 1: /* 286 TSS */
2736 case 9: /* 386 TSS */
2737 case 5: /* task gate */
2738 if (dpl < cpl || dpl < rpl)
2739 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2740 next_eip = env->eip + next_eip_addend;
2741 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2742 CC_OP = CC_OP_EFLAGS;
2743 break;
2744 case 4: /* 286 call gate */
2745 case 12: /* 386 call gate */
2746 if ((dpl < cpl) || (dpl < rpl))
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 if (!(e2 & DESC_P_MASK))
2749 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2750 gate_cs = e1 >> 16;
2751 new_eip = (e1 & 0xffff);
2752 if (type == 12)
2753 new_eip |= (e2 & 0xffff0000);
2754 if (load_segment(&e1, &e2, gate_cs) != 0)
2755 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2756 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2757 /* must be code segment */
2758 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2759 (DESC_S_MASK | DESC_CS_MASK)))
2760 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2761 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2762 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2763 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2764 if (!(e2 & DESC_P_MASK))
2765#ifdef VBOX /* See page 3-514 of 253666.pdf */
2766 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2767#else
2768 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2769#endif
2770 limit = get_seg_limit(e1, e2);
2771 if (new_eip > limit)
2772 raise_exception_err(EXCP0D_GPF, 0);
2773 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2774 get_seg_base(e1, e2), limit, e2);
2775 EIP = new_eip;
2776 break;
2777 default:
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 break;
2780 }
2781 }
2782}
2783
2784/* real mode call */
2785void helper_lcall_real(int new_cs, target_ulong new_eip1,
2786 int shift, int next_eip)
2787{
2788 int new_eip;
2789 uint32_t esp, esp_mask;
2790 target_ulong ssp;
2791
2792 new_eip = new_eip1;
2793 esp = ESP;
2794 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2795 ssp = env->segs[R_SS].base;
2796 if (shift) {
2797 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2798 PUSHL(ssp, esp, esp_mask, next_eip);
2799 } else {
2800 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2801 PUSHW(ssp, esp, esp_mask, next_eip);
2802 }
2803
2804 SET_ESP(esp, esp_mask);
2805 env->eip = new_eip;
2806 env->segs[R_CS].selector = new_cs;
2807 env->segs[R_CS].base = (new_cs << 4);
2808}
2809
2810/* protected mode call */
2811void helper_lcall_protected(int new_cs, target_ulong new_eip,
2812 int shift, int next_eip_addend)
2813{
2814 int new_stack, i;
2815 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2816 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2817 uint32_t val, limit, old_sp_mask;
2818 target_ulong ssp, old_ssp, next_eip;
2819
2820#ifdef VBOX /** @todo Why do we do this? */
2821 e1 = e2 = 0;
2822#endif
2823 next_eip = env->eip + next_eip_addend;
2824 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2825 LOG_PCALL_STATE(env);
2826 if ((new_cs & 0xfffc) == 0)
2827 raise_exception_err(EXCP0D_GPF, 0);
2828 if (load_segment(&e1, &e2, new_cs) != 0)
2829 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2830 cpl = env->hflags & HF_CPL_MASK;
2831 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2832 if (e2 & DESC_S_MASK) {
2833 if (!(e2 & DESC_CS_MASK))
2834 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2835 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2836 if (e2 & DESC_C_MASK) {
2837 /* conforming code segment */
2838 if (dpl > cpl)
2839 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2840 } else {
2841 /* non conforming code segment */
2842 rpl = new_cs & 3;
2843 if (rpl > cpl)
2844 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2845 if (dpl != cpl)
2846 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2847 }
2848 if (!(e2 & DESC_P_MASK))
2849 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2850
2851#ifdef TARGET_X86_64
2852 /* XXX: check 16/32 bit cases in long mode */
2853 if (shift == 2) {
2854 target_ulong rsp;
2855 /* 64 bit case */
2856 rsp = ESP;
2857 PUSHQ(rsp, env->segs[R_CS].selector);
2858 PUSHQ(rsp, next_eip);
2859 /* from this point, not restartable */
2860 ESP = rsp;
2861 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2862 get_seg_base(e1, e2),
2863 get_seg_limit(e1, e2), e2);
2864 EIP = new_eip;
2865 } else
2866#endif
2867 {
2868 sp = ESP;
2869 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2870 ssp = env->segs[R_SS].base;
2871 if (shift) {
2872 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2873 PUSHL(ssp, sp, sp_mask, next_eip);
2874 } else {
2875 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2876 PUSHW(ssp, sp, sp_mask, next_eip);
2877 }
2878
2879 limit = get_seg_limit(e1, e2);
2880 if (new_eip > limit)
2881 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2882 /* from this point, not restartable */
2883 SET_ESP(sp, sp_mask);
2884 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2885 get_seg_base(e1, e2), limit, e2);
2886 EIP = new_eip;
2887 }
2888 } else {
2889 /* check gate type */
2890 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2892 rpl = new_cs & 3;
2893 switch(type) {
2894 case 1: /* available 286 TSS */
2895 case 9: /* available 386 TSS */
2896 case 5: /* task gate */
2897 if (dpl < cpl || dpl < rpl)
2898 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2899 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2900 CC_OP = CC_OP_EFLAGS;
2901 return;
2902 case 4: /* 286 call gate */
2903 case 12: /* 386 call gate */
2904 break;
2905 default:
2906 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2907 break;
2908 }
2909 shift = type >> 3;
2910
2911 if (dpl < cpl || dpl < rpl)
2912 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2913 /* check valid bit */
2914 if (!(e2 & DESC_P_MASK))
2915 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2916 selector = e1 >> 16;
2917 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2918 param_count = e2 & 0x1f;
2919 if ((selector & 0xfffc) == 0)
2920 raise_exception_err(EXCP0D_GPF, 0);
2921
2922 if (load_segment(&e1, &e2, selector) != 0)
2923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2924 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2927 if (dpl > cpl)
2928 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2929 if (!(e2 & DESC_P_MASK))
2930 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2931
2932 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2933 /* to inner privilege */
2934 get_ss_esp_from_tss(&ss, &sp, dpl);
2935 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2936 ss, sp, param_count, ESP);
2937 if ((ss & 0xfffc) == 0)
2938 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2939 if ((ss & 3) != dpl)
2940 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2941 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2942 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2943 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2944 if (ss_dpl != dpl)
2945 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2946 if (!(ss_e2 & DESC_S_MASK) ||
2947 (ss_e2 & DESC_CS_MASK) ||
2948 !(ss_e2 & DESC_W_MASK))
2949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2950 if (!(ss_e2 & DESC_P_MASK))
2951#ifdef VBOX /* See page 3-99 of 253666.pdf */
2952 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2953#else
2954 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2955#endif
2956
2957 // push_size = ((param_count * 2) + 8) << shift;
2958
2959 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2960 old_ssp = env->segs[R_SS].base;
2961
2962 sp_mask = get_sp_mask(ss_e2);
2963 ssp = get_seg_base(ss_e1, ss_e2);
2964 if (shift) {
2965 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2966 PUSHL(ssp, sp, sp_mask, ESP);
2967 for(i = param_count - 1; i >= 0; i--) {
2968 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2969 PUSHL(ssp, sp, sp_mask, val);
2970 }
2971 } else {
2972 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2973 PUSHW(ssp, sp, sp_mask, ESP);
2974 for(i = param_count - 1; i >= 0; i--) {
2975 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2976 PUSHW(ssp, sp, sp_mask, val);
2977 }
2978 }
2979 new_stack = 1;
2980 } else {
2981 /* to same privilege */
2982 sp = ESP;
2983 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2984 ssp = env->segs[R_SS].base;
2985 // push_size = (4 << shift);
2986 new_stack = 0;
2987 }
2988
2989 if (shift) {
2990 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2991 PUSHL(ssp, sp, sp_mask, next_eip);
2992 } else {
2993 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2994 PUSHW(ssp, sp, sp_mask, next_eip);
2995 }
2996
2997 /* from this point, not restartable */
2998
2999 if (new_stack) {
3000 ss = (ss & ~3) | dpl;
3001 cpu_x86_load_seg_cache(env, R_SS, ss,
3002 ssp,
3003 get_seg_limit(ss_e1, ss_e2),
3004 ss_e2);
3005 }
3006
3007 selector = (selector & ~3) | dpl;
3008 cpu_x86_load_seg_cache(env, R_CS, selector,
3009 get_seg_base(e1, e2),
3010 get_seg_limit(e1, e2),
3011 e2);
3012 cpu_x86_set_cpl(env, dpl);
3013 SET_ESP(sp, sp_mask);
3014 EIP = offset;
3015 }
3016}
3017
3018/* real and vm86 mode iret */
3019void helper_iret_real(int shift)
3020{
3021 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3022 target_ulong ssp;
3023 int eflags_mask;
3024#ifdef VBOX
3025 bool fVME = false;
3026
3027 remR3TrapClear(env->pVM);
3028#endif /* VBOX */
3029
3030 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3031 sp = ESP;
3032 ssp = env->segs[R_SS].base;
3033 if (shift == 1) {
3034 /* 32 bits */
3035 POPL(ssp, sp, sp_mask, new_eip);
3036 POPL(ssp, sp, sp_mask, new_cs);
3037 new_cs &= 0xffff;
3038 POPL(ssp, sp, sp_mask, new_eflags);
3039 } else {
3040 /* 16 bits */
3041 POPW(ssp, sp, sp_mask, new_eip);
3042 POPW(ssp, sp, sp_mask, new_cs);
3043 POPW(ssp, sp, sp_mask, new_eflags);
3044 }
3045#ifdef VBOX
3046 if ( (env->eflags & VM_MASK)
3047 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3048 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3049 {
3050 fVME = true;
3051 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3052 /* if TF will be set -> #GP */
3053 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3054 || (new_eflags & TF_MASK))
3055 raise_exception(EXCP0D_GPF);
3056 }
3057#endif /* VBOX */
3058 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3059 env->segs[R_CS].selector = new_cs;
3060 env->segs[R_CS].base = (new_cs << 4);
3061 env->eip = new_eip;
3062#ifdef VBOX
3063 if (fVME)
3064 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3065 else
3066#endif
3067 if (env->eflags & VM_MASK)
3068 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3069 else
3070 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3071 if (shift == 0)
3072 eflags_mask &= 0xffff;
3073 load_eflags(new_eflags, eflags_mask);
3074 env->hflags2 &= ~HF2_NMI_MASK;
3075#ifdef VBOX
3076 if (fVME)
3077 {
3078 if (new_eflags & IF_MASK)
3079 env->eflags |= VIF_MASK;
3080 else
3081 env->eflags &= ~VIF_MASK;
3082 }
3083#endif /* VBOX */
3084}
3085
3086static inline void validate_seg(int seg_reg, int cpl)
3087{
3088 int dpl;
3089 uint32_t e2;
3090
3091 /* XXX: on x86_64, we do not want to nullify FS and GS because
3092 they may still contain a valid base. I would be interested to
3093 know how a real x86_64 CPU behaves */
3094 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3095 (env->segs[seg_reg].selector & 0xfffc) == 0)
3096 return;
3097
3098 e2 = env->segs[seg_reg].flags;
3099 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3100 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3101 /* data or non conforming code segment */
3102 if (dpl < cpl) {
3103 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3104 }
3105 }
3106}
3107
3108/* protected mode iret */
3109static inline void helper_ret_protected(int shift, int is_iret, int addend)
3110{
3111 uint32_t new_cs, new_eflags, new_ss;
3112 uint32_t new_es, new_ds, new_fs, new_gs;
3113 uint32_t e1, e2, ss_e1, ss_e2;
3114 int cpl, dpl, rpl, eflags_mask, iopl;
3115 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3116
3117#ifdef VBOX /** @todo Why do we do this? */
3118 ss_e1 = ss_e2 = e1 = e2 = 0;
3119#endif
3120
3121#ifdef TARGET_X86_64
3122 if (shift == 2)
3123 sp_mask = -1;
3124 else
3125#endif
3126 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3127 sp = ESP;
3128 ssp = env->segs[R_SS].base;
3129 new_eflags = 0; /* avoid warning */
3130#ifdef TARGET_X86_64
3131 if (shift == 2) {
3132 POPQ(sp, new_eip);
3133 POPQ(sp, new_cs);
3134 new_cs &= 0xffff;
3135 if (is_iret) {
3136 POPQ(sp, new_eflags);
3137 }
3138 } else
3139#endif
3140 if (shift == 1) {
3141 /* 32 bits */
3142 POPL(ssp, sp, sp_mask, new_eip);
3143 POPL(ssp, sp, sp_mask, new_cs);
3144 new_cs &= 0xffff;
3145 if (is_iret) {
3146 POPL(ssp, sp, sp_mask, new_eflags);
3147#if defined(VBOX) && defined(DEBUG)
3148 printf("iret: new CS %04X\n", new_cs);
3149 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3150 printf("iret: new EFLAGS %08X\n", new_eflags);
3151 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3152#endif
3153 if (new_eflags & VM_MASK)
3154 goto return_to_vm86;
3155 }
3156#ifdef VBOX
3157 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3158 {
3159# ifdef DEBUG
3160 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3161# endif
3162 new_cs = new_cs & 0xfffc;
3163 }
3164#endif
3165 } else {
3166 /* 16 bits */
3167 POPW(ssp, sp, sp_mask, new_eip);
3168 POPW(ssp, sp, sp_mask, new_cs);
3169 if (is_iret)
3170 POPW(ssp, sp, sp_mask, new_eflags);
3171 }
3172 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3173 new_cs, new_eip, shift, addend);
3174 LOG_PCALL_STATE(env);
3175 if ((new_cs & 0xfffc) == 0)
3176 {
3177#if defined(VBOX) && defined(DEBUG)
3178 printf("new_cs & 0xfffc) == 0\n");
3179#endif
3180 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3181 }
3182 if (load_segment(&e1, &e2, new_cs) != 0)
3183 {
3184#if defined(VBOX) && defined(DEBUG)
3185 printf("load_segment failed\n");
3186#endif
3187 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3188 }
3189 if (!(e2 & DESC_S_MASK) ||
3190 !(e2 & DESC_CS_MASK))
3191 {
3192#if defined(VBOX) && defined(DEBUG)
3193 printf("e2 mask %08x\n", e2);
3194#endif
3195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3196 }
3197 cpl = env->hflags & HF_CPL_MASK;
3198 rpl = new_cs & 3;
3199 if (rpl < cpl)
3200 {
3201#if defined(VBOX) && defined(DEBUG)
3202 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3203#endif
3204 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3205 }
3206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3207 if (e2 & DESC_C_MASK) {
3208 if (dpl > rpl)
3209 {
3210#if defined(VBOX) && defined(DEBUG)
3211 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3212#endif
3213 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3214 }
3215 } else {
3216 if (dpl != rpl)
3217 {
3218#if defined(VBOX) && defined(DEBUG)
3219 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3220#endif
3221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3222 }
3223 }
3224 if (!(e2 & DESC_P_MASK))
3225 {
3226#if defined(VBOX) && defined(DEBUG)
3227 printf("DESC_P_MASK e2=%08x\n", e2);
3228#endif
3229 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3230 }
3231
3232 sp += addend;
3233 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3234 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3235 /* return to same privilege level */
3236 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3237 get_seg_base(e1, e2),
3238 get_seg_limit(e1, e2),
3239 e2);
3240 } else {
3241 /* return to different privilege level */
3242#ifdef TARGET_X86_64
3243 if (shift == 2) {
3244 POPQ(sp, new_esp);
3245 POPQ(sp, new_ss);
3246 new_ss &= 0xffff;
3247 } else
3248#endif
3249 if (shift == 1) {
3250 /* 32 bits */
3251 POPL(ssp, sp, sp_mask, new_esp);
3252 POPL(ssp, sp, sp_mask, new_ss);
3253 new_ss &= 0xffff;
3254 } else {
3255 /* 16 bits */
3256 POPW(ssp, sp, sp_mask, new_esp);
3257 POPW(ssp, sp, sp_mask, new_ss);
3258 }
3259 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3260 new_ss, new_esp);
3261 if ((new_ss & 0xfffc) == 0) {
3262#ifdef TARGET_X86_64
3263 /* NULL ss is allowed in long mode if cpl != 3*/
3264 /* XXX: test CS64 ? */
3265 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3266 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3267 0, 0xffffffff,
3268 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3269 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3270 DESC_W_MASK | DESC_A_MASK);
3271 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3272 } else
3273#endif
3274 {
3275 raise_exception_err(EXCP0D_GPF, 0);
3276 }
3277 } else {
3278 if ((new_ss & 3) != rpl)
3279 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3280 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3281 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3282 if (!(ss_e2 & DESC_S_MASK) ||
3283 (ss_e2 & DESC_CS_MASK) ||
3284 !(ss_e2 & DESC_W_MASK))
3285 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3286 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3287 if (dpl != rpl)
3288 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3289 if (!(ss_e2 & DESC_P_MASK))
3290 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3291 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3292 get_seg_base(ss_e1, ss_e2),
3293 get_seg_limit(ss_e1, ss_e2),
3294 ss_e2);
3295 }
3296
3297 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3298 get_seg_base(e1, e2),
3299 get_seg_limit(e1, e2),
3300 e2);
3301 cpu_x86_set_cpl(env, rpl);
3302 sp = new_esp;
3303#ifdef TARGET_X86_64
3304 if (env->hflags & HF_CS64_MASK)
3305 sp_mask = -1;
3306 else
3307#endif
3308 sp_mask = get_sp_mask(ss_e2);
3309
3310 /* validate data segments */
3311 validate_seg(R_ES, rpl);
3312 validate_seg(R_DS, rpl);
3313 validate_seg(R_FS, rpl);
3314 validate_seg(R_GS, rpl);
3315
3316 sp += addend;
3317 }
3318 SET_ESP(sp, sp_mask);
3319 env->eip = new_eip;
3320 if (is_iret) {
3321 /* NOTE: 'cpl' is the _old_ CPL */
3322 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3323 if (cpl == 0)
3324#ifdef VBOX
3325 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3326#else
3327 eflags_mask |= IOPL_MASK;
3328#endif
3329 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3330 if (cpl <= iopl)
3331 eflags_mask |= IF_MASK;
3332 if (shift == 0)
3333 eflags_mask &= 0xffff;
3334 load_eflags(new_eflags, eflags_mask);
3335 }
3336 return;
3337
3338 return_to_vm86:
3339 POPL(ssp, sp, sp_mask, new_esp);
3340 POPL(ssp, sp, sp_mask, new_ss);
3341 POPL(ssp, sp, sp_mask, new_es);
3342 POPL(ssp, sp, sp_mask, new_ds);
3343 POPL(ssp, sp, sp_mask, new_fs);
3344 POPL(ssp, sp, sp_mask, new_gs);
3345
3346 /* modify processor state */
3347 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3348 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3349 load_seg_vm(R_CS, new_cs & 0xffff);
3350 cpu_x86_set_cpl(env, 3);
3351 load_seg_vm(R_SS, new_ss & 0xffff);
3352 load_seg_vm(R_ES, new_es & 0xffff);
3353 load_seg_vm(R_DS, new_ds & 0xffff);
3354 load_seg_vm(R_FS, new_fs & 0xffff);
3355 load_seg_vm(R_GS, new_gs & 0xffff);
3356
3357 env->eip = new_eip & 0xffff;
3358 ESP = new_esp;
3359}
3360
3361void helper_iret_protected(int shift, int next_eip)
3362{
3363 int tss_selector, type;
3364 uint32_t e1, e2;
3365
3366#ifdef VBOX
3367 e1 = e2 = 0; /** @todo Why do we do this? */
3368 remR3TrapClear(env->pVM);
3369#endif
3370
3371 /* specific case for TSS */
3372 if (env->eflags & NT_MASK) {
3373#ifdef TARGET_X86_64
3374 if (env->hflags & HF_LMA_MASK)
3375 raise_exception_err(EXCP0D_GPF, 0);
3376#endif
3377 tss_selector = lduw_kernel(env->tr.base + 0);
3378 if (tss_selector & 4)
3379 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3380 if (load_segment(&e1, &e2, tss_selector) != 0)
3381 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3382 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3383 /* NOTE: we check both segment and busy TSS */
3384 if (type != 3)
3385 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3386 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3387 } else {
3388 helper_ret_protected(shift, 1, 0);
3389 }
3390 env->hflags2 &= ~HF2_NMI_MASK;
3391}
3392
3393void helper_lret_protected(int shift, int addend)
3394{
3395 helper_ret_protected(shift, 0, addend);
3396}
3397
3398void helper_sysenter(void)
3399{
3400 if (env->sysenter_cs == 0) {
3401 raise_exception_err(EXCP0D_GPF, 0);
3402 }
3403 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3404 cpu_x86_set_cpl(env, 0);
3405
3406#ifdef TARGET_X86_64
3407 if (env->hflags & HF_LMA_MASK) {
3408 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3409 0, 0xffffffff,
3410 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3411 DESC_S_MASK |
3412 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3413 } else
3414#endif
3415 {
3416 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3417 0, 0xffffffff,
3418 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3419 DESC_S_MASK |
3420 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3421 }
3422 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3423 0, 0xffffffff,
3424 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3425 DESC_S_MASK |
3426 DESC_W_MASK | DESC_A_MASK);
3427 ESP = env->sysenter_esp;
3428 EIP = env->sysenter_eip;
3429}
3430
3431void helper_sysexit(int dflag)
3432{
3433 int cpl;
3434
3435 cpl = env->hflags & HF_CPL_MASK;
3436 if (env->sysenter_cs == 0 || cpl != 0) {
3437 raise_exception_err(EXCP0D_GPF, 0);
3438 }
3439 cpu_x86_set_cpl(env, 3);
3440#ifdef TARGET_X86_64
3441 if (dflag == 2) {
3442 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3443 0, 0xffffffff,
3444 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3445 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3446 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3447 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3448 0, 0xffffffff,
3449 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3450 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3451 DESC_W_MASK | DESC_A_MASK);
3452 } else
3453#endif
3454 {
3455 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3456 0, 0xffffffff,
3457 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3458 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3459 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3460 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3461 0, 0xffffffff,
3462 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3463 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3464 DESC_W_MASK | DESC_A_MASK);
3465 }
3466 ESP = ECX;
3467 EIP = EDX;
3468}
3469
3470#if defined(CONFIG_USER_ONLY)
3471target_ulong helper_read_crN(int reg)
3472{
3473 return 0;
3474}
3475
3476void helper_write_crN(int reg, target_ulong t0)
3477{
3478}
3479
3480void helper_movl_drN_T0(int reg, target_ulong t0)
3481{
3482}
3483#else
3484target_ulong helper_read_crN(int reg)
3485{
3486 target_ulong val;
3487
3488 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3489 switch(reg) {
3490 default:
3491 val = env->cr[reg];
3492 break;
3493 case 8:
3494 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3495#ifndef VBOX
3496 val = cpu_get_apic_tpr(env->apic_state);
3497#else /* VBOX */
3498 val = cpu_get_apic_tpr(env);
3499#endif /* VBOX */
3500 } else {
3501 val = env->v_tpr;
3502 }
3503 break;
3504 }
3505 return val;
3506}
3507
3508void helper_write_crN(int reg, target_ulong t0)
3509{
3510 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3511 switch(reg) {
3512 case 0:
3513 cpu_x86_update_cr0(env, t0);
3514 break;
3515 case 3:
3516 cpu_x86_update_cr3(env, t0);
3517 break;
3518 case 4:
3519 cpu_x86_update_cr4(env, t0);
3520 break;
3521 case 8:
3522 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3523#ifndef VBOX
3524 cpu_set_apic_tpr(env->apic_state, t0);
3525#else /* VBOX */
3526 cpu_set_apic_tpr(env, t0);
3527#endif /* VBOX */
3528 }
3529 env->v_tpr = t0 & 0x0f;
3530 break;
3531 default:
3532 env->cr[reg] = t0;
3533 break;
3534 }
3535}
3536
3537void helper_movl_drN_T0(int reg, target_ulong t0)
3538{
3539 int i;
3540
3541 if (reg < 4) {
3542 hw_breakpoint_remove(env, reg);
3543 env->dr[reg] = t0;
3544 hw_breakpoint_insert(env, reg);
3545 } else if (reg == 7) {
3546 for (i = 0; i < 4; i++)
3547 hw_breakpoint_remove(env, i);
3548 env->dr[7] = t0;
3549 for (i = 0; i < 4; i++)
3550 hw_breakpoint_insert(env, i);
3551 } else
3552 env->dr[reg] = t0;
3553}
3554#endif
3555
3556void helper_lmsw(target_ulong t0)
3557{
3558 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3559 if already set to one. */
3560 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3561 helper_write_crN(0, t0);
3562}
3563
3564void helper_clts(void)
3565{
3566 env->cr[0] &= ~CR0_TS_MASK;
3567 env->hflags &= ~HF_TS_MASK;
3568}
3569
3570void helper_invlpg(target_ulong addr)
3571{
3572 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3573 tlb_flush_page(env, addr);
3574}
3575
3576void helper_rdtsc(void)
3577{
3578 uint64_t val;
3579
3580 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3581 raise_exception(EXCP0D_GPF);
3582 }
3583 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3584
3585 val = cpu_get_tsc(env) + env->tsc_offset;
3586 EAX = (uint32_t)(val);
3587 EDX = (uint32_t)(val >> 32);
3588}
3589
3590void helper_rdtscp(void)
3591{
3592 helper_rdtsc();
3593#ifndef VBOX
3594 ECX = (uint32_t)(env->tsc_aux);
3595#else /* VBOX */
3596 uint64_t val;
3597 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3598 ECX = (uint32_t)(val);
3599 else
3600 ECX = 0;
3601#endif /* VBOX */
3602}
3603
3604void helper_rdpmc(void)
3605{
3606#ifdef VBOX
3607 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3608 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3609 raise_exception(EXCP0D_GPF);
3610 }
3611 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3612 EAX = 0;
3613 EDX = 0;
3614#else /* !VBOX */
3615 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3616 raise_exception(EXCP0D_GPF);
3617 }
3618 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3619
3620 /* currently unimplemented */
3621 raise_exception_err(EXCP06_ILLOP, 0);
3622#endif /* !VBOX */
3623}
3624
3625#if defined(CONFIG_USER_ONLY)
3626void helper_wrmsr(void)
3627{
3628}
3629
3630void helper_rdmsr(void)
3631{
3632}
3633#else
3634void helper_wrmsr(void)
3635{
3636 uint64_t val;
3637
3638 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3639
3640 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3641
3642 switch((uint32_t)ECX) {
3643 case MSR_IA32_SYSENTER_CS:
3644 env->sysenter_cs = val & 0xffff;
3645 break;
3646 case MSR_IA32_SYSENTER_ESP:
3647 env->sysenter_esp = val;
3648 break;
3649 case MSR_IA32_SYSENTER_EIP:
3650 env->sysenter_eip = val;
3651 break;
3652 case MSR_IA32_APICBASE:
3653# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3654 cpu_set_apic_base(env->apic_state, val);
3655# endif
3656 break;
3657 case MSR_EFER:
3658 {
3659 uint64_t update_mask;
3660 update_mask = 0;
3661 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3662 update_mask |= MSR_EFER_SCE;
3663 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3664 update_mask |= MSR_EFER_LME;
3665 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3666 update_mask |= MSR_EFER_FFXSR;
3667 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3668 update_mask |= MSR_EFER_NXE;
3669 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3670 update_mask |= MSR_EFER_SVME;
3671 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3672 update_mask |= MSR_EFER_FFXSR;
3673 cpu_load_efer(env, (env->efer & ~update_mask) |
3674 (val & update_mask));
3675 }
3676 break;
3677 case MSR_STAR:
3678 env->star = val;
3679 break;
3680 case MSR_PAT:
3681 env->pat = val;
3682 break;
3683 case MSR_VM_HSAVE_PA:
3684 env->vm_hsave = val;
3685 break;
3686#ifdef TARGET_X86_64
3687 case MSR_LSTAR:
3688 env->lstar = val;
3689 break;
3690 case MSR_CSTAR:
3691 env->cstar = val;
3692 break;
3693 case MSR_FMASK:
3694 env->fmask = val;
3695 break;
3696 case MSR_FSBASE:
3697 env->segs[R_FS].base = val;
3698 break;
3699 case MSR_GSBASE:
3700 env->segs[R_GS].base = val;
3701 break;
3702 case MSR_KERNELGSBASE:
3703 env->kernelgsbase = val;
3704 break;
3705#endif
3706# ifndef VBOX
3707 case MSR_MTRRphysBase(0):
3708 case MSR_MTRRphysBase(1):
3709 case MSR_MTRRphysBase(2):
3710 case MSR_MTRRphysBase(3):
3711 case MSR_MTRRphysBase(4):
3712 case MSR_MTRRphysBase(5):
3713 case MSR_MTRRphysBase(6):
3714 case MSR_MTRRphysBase(7):
3715 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3716 break;
3717 case MSR_MTRRphysMask(0):
3718 case MSR_MTRRphysMask(1):
3719 case MSR_MTRRphysMask(2):
3720 case MSR_MTRRphysMask(3):
3721 case MSR_MTRRphysMask(4):
3722 case MSR_MTRRphysMask(5):
3723 case MSR_MTRRphysMask(6):
3724 case MSR_MTRRphysMask(7):
3725 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3726 break;
3727 case MSR_MTRRfix64K_00000:
3728 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3729 break;
3730 case MSR_MTRRfix16K_80000:
3731 case MSR_MTRRfix16K_A0000:
3732 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3733 break;
3734 case MSR_MTRRfix4K_C0000:
3735 case MSR_MTRRfix4K_C8000:
3736 case MSR_MTRRfix4K_D0000:
3737 case MSR_MTRRfix4K_D8000:
3738 case MSR_MTRRfix4K_E0000:
3739 case MSR_MTRRfix4K_E8000:
3740 case MSR_MTRRfix4K_F0000:
3741 case MSR_MTRRfix4K_F8000:
3742 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3743 break;
3744 case MSR_MTRRdefType:
3745 env->mtrr_deftype = val;
3746 break;
3747 case MSR_MCG_STATUS:
3748 env->mcg_status = val;
3749 break;
3750 case MSR_MCG_CTL:
3751 if ((env->mcg_cap & MCG_CTL_P)
3752 && (val == 0 || val == ~(uint64_t)0))
3753 env->mcg_ctl = val;
3754 break;
3755 case MSR_TSC_AUX:
3756 env->tsc_aux = val;
3757 break;
3758# endif /* !VBOX */
3759 default:
3760# ifndef VBOX
3761 if ((uint32_t)ECX >= MSR_MC0_CTL
3762 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3763 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3764 if ((offset & 0x3) != 0
3765 || (val == 0 || val == ~(uint64_t)0))
3766 env->mce_banks[offset] = val;
3767 break;
3768 }
3769 /* XXX: exception ? */
3770# endif
3771 break;
3772 }
3773
3774# ifdef VBOX
3775 /* call CPUM. */
3776 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3777 {
3778 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3779 }
3780# endif
3781}
3782
3783void helper_rdmsr(void)
3784{
3785 uint64_t val;
3786
3787 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3788
3789 switch((uint32_t)ECX) {
3790 case MSR_IA32_SYSENTER_CS:
3791 val = env->sysenter_cs;
3792 break;
3793 case MSR_IA32_SYSENTER_ESP:
3794 val = env->sysenter_esp;
3795 break;
3796 case MSR_IA32_SYSENTER_EIP:
3797 val = env->sysenter_eip;
3798 break;
3799 case MSR_IA32_APICBASE:
3800#ifndef VBOX
3801 val = cpu_get_apic_base(env->apic_state);
3802#else /* VBOX */
3803 val = cpu_get_apic_base(env);
3804#endif /* VBOX */
3805 break;
3806 case MSR_EFER:
3807 val = env->efer;
3808 break;
3809 case MSR_STAR:
3810 val = env->star;
3811 break;
3812 case MSR_PAT:
3813 val = env->pat;
3814 break;
3815 case MSR_VM_HSAVE_PA:
3816 val = env->vm_hsave;
3817 break;
3818# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3819 case MSR_IA32_PERF_STATUS:
3820 /* tsc_increment_by_tick */
3821 val = 1000ULL;
3822 /* CPU multiplier */
3823 val |= (((uint64_t)4ULL) << 40);
3824 break;
3825# endif /* !VBOX */
3826#ifdef TARGET_X86_64
3827 case MSR_LSTAR:
3828 val = env->lstar;
3829 break;
3830 case MSR_CSTAR:
3831 val = env->cstar;
3832 break;
3833 case MSR_FMASK:
3834 val = env->fmask;
3835 break;
3836 case MSR_FSBASE:
3837 val = env->segs[R_FS].base;
3838 break;
3839 case MSR_GSBASE:
3840 val = env->segs[R_GS].base;
3841 break;
3842 case MSR_KERNELGSBASE:
3843 val = env->kernelgsbase;
3844 break;
3845# ifndef VBOX
3846 case MSR_TSC_AUX:
3847 val = env->tsc_aux;
3848 break;
3849# endif /*!VBOX*/
3850#endif
3851# ifndef VBOX
3852 case MSR_MTRRphysBase(0):
3853 case MSR_MTRRphysBase(1):
3854 case MSR_MTRRphysBase(2):
3855 case MSR_MTRRphysBase(3):
3856 case MSR_MTRRphysBase(4):
3857 case MSR_MTRRphysBase(5):
3858 case MSR_MTRRphysBase(6):
3859 case MSR_MTRRphysBase(7):
3860 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3861 break;
3862 case MSR_MTRRphysMask(0):
3863 case MSR_MTRRphysMask(1):
3864 case MSR_MTRRphysMask(2):
3865 case MSR_MTRRphysMask(3):
3866 case MSR_MTRRphysMask(4):
3867 case MSR_MTRRphysMask(5):
3868 case MSR_MTRRphysMask(6):
3869 case MSR_MTRRphysMask(7):
3870 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3871 break;
3872 case MSR_MTRRfix64K_00000:
3873 val = env->mtrr_fixed[0];
3874 break;
3875 case MSR_MTRRfix16K_80000:
3876 case MSR_MTRRfix16K_A0000:
3877 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3878 break;
3879 case MSR_MTRRfix4K_C0000:
3880 case MSR_MTRRfix4K_C8000:
3881 case MSR_MTRRfix4K_D0000:
3882 case MSR_MTRRfix4K_D8000:
3883 case MSR_MTRRfix4K_E0000:
3884 case MSR_MTRRfix4K_E8000:
3885 case MSR_MTRRfix4K_F0000:
3886 case MSR_MTRRfix4K_F8000:
3887 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3888 break;
3889 case MSR_MTRRdefType:
3890 val = env->mtrr_deftype;
3891 break;
3892 case MSR_MTRRcap:
3893 if (env->cpuid_features & CPUID_MTRR)
3894 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3895 else
3896 /* XXX: exception ? */
3897 val = 0;
3898 break;
3899 case MSR_MCG_CAP:
3900 val = env->mcg_cap;
3901 break;
3902 case MSR_MCG_CTL:
3903 if (env->mcg_cap & MCG_CTL_P)
3904 val = env->mcg_ctl;
3905 else
3906 val = 0;
3907 break;
3908 case MSR_MCG_STATUS:
3909 val = env->mcg_status;
3910 break;
3911# endif /* !VBOX */
3912 default:
3913# ifndef VBOX
3914 if ((uint32_t)ECX >= MSR_MC0_CTL
3915 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3916 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3917 val = env->mce_banks[offset];
3918 break;
3919 }
3920 /* XXX: exception ? */
3921 val = 0;
3922# else /* VBOX */
3923 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3924 {
3925 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3926 val = 0;
3927 }
3928# endif /* VBOX */
3929 break;
3930 }
3931 EAX = (uint32_t)(val);
3932 EDX = (uint32_t)(val >> 32);
3933
3934# ifdef VBOX_STRICT
3935 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3936 val = 0;
3937 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3938# endif
3939}
3940#endif
3941
3942target_ulong helper_lsl(target_ulong selector1)
3943{
3944 unsigned int limit;
3945 uint32_t e1, e2, eflags, selector;
3946 int rpl, dpl, cpl, type;
3947
3948 selector = selector1 & 0xffff;
3949 eflags = helper_cc_compute_all(CC_OP);
3950 if ((selector & 0xfffc) == 0)
3951 goto fail;
3952 if (load_segment(&e1, &e2, selector) != 0)
3953 goto fail;
3954 rpl = selector & 3;
3955 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3956 cpl = env->hflags & HF_CPL_MASK;
3957 if (e2 & DESC_S_MASK) {
3958 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3959 /* conforming */
3960 } else {
3961 if (dpl < cpl || dpl < rpl)
3962 goto fail;
3963 }
3964 } else {
3965 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3966 switch(type) {
3967 case 1:
3968 case 2:
3969 case 3:
3970 case 9:
3971 case 11:
3972 break;
3973 default:
3974 goto fail;
3975 }
3976 if (dpl < cpl || dpl < rpl) {
3977 fail:
3978 CC_SRC = eflags & ~CC_Z;
3979 return 0;
3980 }
3981 }
3982 limit = get_seg_limit(e1, e2);
3983 CC_SRC = eflags | CC_Z;
3984 return limit;
3985}
3986
3987target_ulong helper_lar(target_ulong selector1)
3988{
3989 uint32_t e1, e2, eflags, selector;
3990 int rpl, dpl, cpl, type;
3991
3992 selector = selector1 & 0xffff;
3993 eflags = helper_cc_compute_all(CC_OP);
3994 if ((selector & 0xfffc) == 0)
3995 goto fail;
3996 if (load_segment(&e1, &e2, selector) != 0)
3997 goto fail;
3998 rpl = selector & 3;
3999 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4000 cpl = env->hflags & HF_CPL_MASK;
4001 if (e2 & DESC_S_MASK) {
4002 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4003 /* conforming */
4004 } else {
4005 if (dpl < cpl || dpl < rpl)
4006 goto fail;
4007 }
4008 } else {
4009 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4010 switch(type) {
4011 case 1:
4012 case 2:
4013 case 3:
4014 case 4:
4015 case 5:
4016 case 9:
4017 case 11:
4018 case 12:
4019 break;
4020 default:
4021 goto fail;
4022 }
4023 if (dpl < cpl || dpl < rpl) {
4024 fail:
4025 CC_SRC = eflags & ~CC_Z;
4026 return 0;
4027 }
4028 }
4029 CC_SRC = eflags | CC_Z;
4030 return e2 & 0x00f0ff00;
4031}
4032
4033void helper_verr(target_ulong selector1)
4034{
4035 uint32_t e1, e2, eflags, selector;
4036 int rpl, dpl, cpl;
4037
4038 selector = selector1 & 0xffff;
4039 eflags = helper_cc_compute_all(CC_OP);
4040 if ((selector & 0xfffc) == 0)
4041 goto fail;
4042 if (load_segment(&e1, &e2, selector) != 0)
4043 goto fail;
4044 if (!(e2 & DESC_S_MASK))
4045 goto fail;
4046 rpl = selector & 3;
4047 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4048 cpl = env->hflags & HF_CPL_MASK;
4049 if (e2 & DESC_CS_MASK) {
4050 if (!(e2 & DESC_R_MASK))
4051 goto fail;
4052 if (!(e2 & DESC_C_MASK)) {
4053 if (dpl < cpl || dpl < rpl)
4054 goto fail;
4055 }
4056 } else {
4057 if (dpl < cpl || dpl < rpl) {
4058 fail:
4059 CC_SRC = eflags & ~CC_Z;
4060 return;
4061 }
4062 }
4063 CC_SRC = eflags | CC_Z;
4064}
4065
4066void helper_verw(target_ulong selector1)
4067{
4068 uint32_t e1, e2, eflags, selector;
4069 int rpl, dpl, cpl;
4070
4071 selector = selector1 & 0xffff;
4072 eflags = helper_cc_compute_all(CC_OP);
4073 if ((selector & 0xfffc) == 0)
4074 goto fail;
4075 if (load_segment(&e1, &e2, selector) != 0)
4076 goto fail;
4077 if (!(e2 & DESC_S_MASK))
4078 goto fail;
4079 rpl = selector & 3;
4080 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4081 cpl = env->hflags & HF_CPL_MASK;
4082 if (e2 & DESC_CS_MASK) {
4083 goto fail;
4084 } else {
4085 if (dpl < cpl || dpl < rpl)
4086 goto fail;
4087 if (!(e2 & DESC_W_MASK)) {
4088 fail:
4089 CC_SRC = eflags & ~CC_Z;
4090 return;
4091 }
4092 }
4093 CC_SRC = eflags | CC_Z;
4094}
4095
4096/* x87 FPU helpers */
4097
4098static void fpu_set_exception(int mask)
4099{
4100 env->fpus |= mask;
4101 if (env->fpus & (~env->fpuc & FPUC_EM))
4102 env->fpus |= FPUS_SE | FPUS_B;
4103}
4104
4105static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4106{
4107 if (b == 0.0)
4108 fpu_set_exception(FPUS_ZE);
4109 return a / b;
4110}
4111
4112static void fpu_raise_exception(void)
4113{
4114 if (env->cr[0] & CR0_NE_MASK) {
4115 raise_exception(EXCP10_COPR);
4116 }
4117#if !defined(CONFIG_USER_ONLY)
4118 else {
4119 cpu_set_ferr(env);
4120 }
4121#endif
4122}
4123
4124void helper_flds_FT0(uint32_t val)
4125{
4126 union {
4127 float32 f;
4128 uint32_t i;
4129 } u;
4130 u.i = val;
4131 FT0 = float32_to_floatx(u.f, &env->fp_status);
4132}
4133
4134void helper_fldl_FT0(uint64_t val)
4135{
4136 union {
4137 float64 f;
4138 uint64_t i;
4139 } u;
4140 u.i = val;
4141 FT0 = float64_to_floatx(u.f, &env->fp_status);
4142}
4143
4144void helper_fildl_FT0(int32_t val)
4145{
4146 FT0 = int32_to_floatx(val, &env->fp_status);
4147}
4148
4149void helper_flds_ST0(uint32_t val)
4150{
4151 int new_fpstt;
4152 union {
4153 float32 f;
4154 uint32_t i;
4155 } u;
4156 new_fpstt = (env->fpstt - 1) & 7;
4157 u.i = val;
4158 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4159 env->fpstt = new_fpstt;
4160 env->fptags[new_fpstt] = 0; /* validate stack entry */
4161}
4162
4163void helper_fldl_ST0(uint64_t val)
4164{
4165 int new_fpstt;
4166 union {
4167 float64 f;
4168 uint64_t i;
4169 } u;
4170 new_fpstt = (env->fpstt - 1) & 7;
4171 u.i = val;
4172 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4173 env->fpstt = new_fpstt;
4174 env->fptags[new_fpstt] = 0; /* validate stack entry */
4175}
4176
4177void helper_fildl_ST0(int32_t val)
4178{
4179 int new_fpstt;
4180 new_fpstt = (env->fpstt - 1) & 7;
4181 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4182 env->fpstt = new_fpstt;
4183 env->fptags[new_fpstt] = 0; /* validate stack entry */
4184}
4185
4186void helper_fildll_ST0(int64_t val)
4187{
4188 int new_fpstt;
4189 new_fpstt = (env->fpstt - 1) & 7;
4190 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4191 env->fpstt = new_fpstt;
4192 env->fptags[new_fpstt] = 0; /* validate stack entry */
4193}
4194
4195#ifndef VBOX
4196uint32_t helper_fsts_ST0(void)
4197#else
4198RTCCUINTREG helper_fsts_ST0(void)
4199#endif
4200{
4201 union {
4202 float32 f;
4203 uint32_t i;
4204 } u;
4205 u.f = floatx_to_float32(ST0, &env->fp_status);
4206 return u.i;
4207}
4208
4209uint64_t helper_fstl_ST0(void)
4210{
4211 union {
4212 float64 f;
4213 uint64_t i;
4214 } u;
4215 u.f = floatx_to_float64(ST0, &env->fp_status);
4216 return u.i;
4217}
4218
4219#ifndef VBOX
4220int32_t helper_fist_ST0(void)
4221#else
4222RTCCINTREG helper_fist_ST0(void)
4223#endif
4224{
4225 int32_t val;
4226 val = floatx_to_int32(ST0, &env->fp_status);
4227 if (val != (int16_t)val)
4228 val = -32768;
4229 return val;
4230}
4231
4232#ifndef VBOX
4233int32_t helper_fistl_ST0(void)
4234#else
4235RTCCINTREG helper_fistl_ST0(void)
4236#endif
4237{
4238 int32_t val;
4239 val = floatx_to_int32(ST0, &env->fp_status);
4240 return val;
4241}
4242
4243int64_t helper_fistll_ST0(void)
4244{
4245 int64_t val;
4246 val = floatx_to_int64(ST0, &env->fp_status);
4247 return val;
4248}
4249
4250#ifndef VBOX
4251int32_t helper_fistt_ST0(void)
4252#else
4253RTCCINTREG helper_fistt_ST0(void)
4254#endif
4255{
4256 int32_t val;
4257 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4258 if (val != (int16_t)val)
4259 val = -32768;
4260 return val;
4261}
4262
4263#ifndef VBOX
4264int32_t helper_fisttl_ST0(void)
4265#else
4266RTCCINTREG helper_fisttl_ST0(void)
4267#endif
4268{
4269 int32_t val;
4270 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4271 return val;
4272}
4273
4274int64_t helper_fisttll_ST0(void)
4275{
4276 int64_t val;
4277 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4278 return val;
4279}
4280
4281void helper_fldt_ST0(target_ulong ptr)
4282{
4283 int new_fpstt;
4284 new_fpstt = (env->fpstt - 1) & 7;
4285 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4286 env->fpstt = new_fpstt;
4287 env->fptags[new_fpstt] = 0; /* validate stack entry */
4288}
4289
4290void helper_fstt_ST0(target_ulong ptr)
4291{
4292 helper_fstt(ST0, ptr);
4293}
4294
4295void helper_fpush(void)
4296{
4297 fpush();
4298}
4299
4300void helper_fpop(void)
4301{
4302 fpop();
4303}
4304
4305void helper_fdecstp(void)
4306{
4307 env->fpstt = (env->fpstt - 1) & 7;
4308 env->fpus &= (~0x4700);
4309}
4310
4311void helper_fincstp(void)
4312{
4313 env->fpstt = (env->fpstt + 1) & 7;
4314 env->fpus &= (~0x4700);
4315}
4316
4317/* FPU move */
4318
4319void helper_ffree_STN(int st_index)
4320{
4321 env->fptags[(env->fpstt + st_index) & 7] = 1;
4322}
4323
4324void helper_fmov_ST0_FT0(void)
4325{
4326 ST0 = FT0;
4327}
4328
4329void helper_fmov_FT0_STN(int st_index)
4330{
4331 FT0 = ST(st_index);
4332}
4333
4334void helper_fmov_ST0_STN(int st_index)
4335{
4336 ST0 = ST(st_index);
4337}
4338
4339void helper_fmov_STN_ST0(int st_index)
4340{
4341 ST(st_index) = ST0;
4342}
4343
4344void helper_fxchg_ST0_STN(int st_index)
4345{
4346 CPU86_LDouble tmp;
4347 tmp = ST(st_index);
4348 ST(st_index) = ST0;
4349 ST0 = tmp;
4350}
4351
4352/* FPU operations */
4353
4354static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4355
4356void helper_fcom_ST0_FT0(void)
4357{
4358 int ret;
4359
4360 ret = floatx_compare(ST0, FT0, &env->fp_status);
4361 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4362}
4363
4364void helper_fucom_ST0_FT0(void)
4365{
4366 int ret;
4367
4368 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4369 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4370}
4371
4372static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4373
4374void helper_fcomi_ST0_FT0(void)
4375{
4376 int eflags;
4377 int ret;
4378
4379 ret = floatx_compare(ST0, FT0, &env->fp_status);
4380 eflags = helper_cc_compute_all(CC_OP);
4381 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4382 CC_SRC = eflags;
4383}
4384
4385void helper_fucomi_ST0_FT0(void)
4386{
4387 int eflags;
4388 int ret;
4389
4390 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4391 eflags = helper_cc_compute_all(CC_OP);
4392 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4393 CC_SRC = eflags;
4394}
4395
4396void helper_fadd_ST0_FT0(void)
4397{
4398 ST0 += FT0;
4399}
4400
4401void helper_fmul_ST0_FT0(void)
4402{
4403 ST0 *= FT0;
4404}
4405
4406void helper_fsub_ST0_FT0(void)
4407{
4408 ST0 -= FT0;
4409}
4410
4411void helper_fsubr_ST0_FT0(void)
4412{
4413 ST0 = FT0 - ST0;
4414}
4415
4416void helper_fdiv_ST0_FT0(void)
4417{
4418 ST0 = helper_fdiv(ST0, FT0);
4419}
4420
4421void helper_fdivr_ST0_FT0(void)
4422{
4423 ST0 = helper_fdiv(FT0, ST0);
4424}
4425
4426/* fp operations between STN and ST0 */
4427
4428void helper_fadd_STN_ST0(int st_index)
4429{
4430 ST(st_index) += ST0;
4431}
4432
4433void helper_fmul_STN_ST0(int st_index)
4434{
4435 ST(st_index) *= ST0;
4436}
4437
4438void helper_fsub_STN_ST0(int st_index)
4439{
4440 ST(st_index) -= ST0;
4441}
4442
4443void helper_fsubr_STN_ST0(int st_index)
4444{
4445 CPU86_LDouble *p;
4446 p = &ST(st_index);
4447 *p = ST0 - *p;
4448}
4449
4450void helper_fdiv_STN_ST0(int st_index)
4451{
4452 CPU86_LDouble *p;
4453 p = &ST(st_index);
4454 *p = helper_fdiv(*p, ST0);
4455}
4456
4457void helper_fdivr_STN_ST0(int st_index)
4458{
4459 CPU86_LDouble *p;
4460 p = &ST(st_index);
4461 *p = helper_fdiv(ST0, *p);
4462}
4463
4464/* misc FPU operations */
4465void helper_fchs_ST0(void)
4466{
4467 ST0 = floatx_chs(ST0);
4468}
4469
4470void helper_fabs_ST0(void)
4471{
4472 ST0 = floatx_abs(ST0);
4473}
4474
4475void helper_fld1_ST0(void)
4476{
4477 ST0 = f15rk[1];
4478}
4479
4480void helper_fldl2t_ST0(void)
4481{
4482 ST0 = f15rk[6];
4483}
4484
4485void helper_fldl2e_ST0(void)
4486{
4487 ST0 = f15rk[5];
4488}
4489
4490void helper_fldpi_ST0(void)
4491{
4492 ST0 = f15rk[2];
4493}
4494
4495void helper_fldlg2_ST0(void)
4496{
4497 ST0 = f15rk[3];
4498}
4499
4500void helper_fldln2_ST0(void)
4501{
4502 ST0 = f15rk[4];
4503}
4504
4505void helper_fldz_ST0(void)
4506{
4507 ST0 = f15rk[0];
4508}
4509
4510void helper_fldz_FT0(void)
4511{
4512 FT0 = f15rk[0];
4513}
4514
4515#ifndef VBOX
4516uint32_t helper_fnstsw(void)
4517#else
4518RTCCUINTREG helper_fnstsw(void)
4519#endif
4520{
4521 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4522}
4523
4524#ifndef VBOX
4525uint32_t helper_fnstcw(void)
4526#else
4527RTCCUINTREG helper_fnstcw(void)
4528#endif
4529{
4530 return env->fpuc;
4531}
4532
4533static void update_fp_status(void)
4534{
4535 int rnd_type;
4536
4537 /* set rounding mode */
4538 switch(env->fpuc & RC_MASK) {
4539 default:
4540 case RC_NEAR:
4541 rnd_type = float_round_nearest_even;
4542 break;
4543 case RC_DOWN:
4544 rnd_type = float_round_down;
4545 break;
4546 case RC_UP:
4547 rnd_type = float_round_up;
4548 break;
4549 case RC_CHOP:
4550 rnd_type = float_round_to_zero;
4551 break;
4552 }
4553 set_float_rounding_mode(rnd_type, &env->fp_status);
4554#ifdef FLOATX80
4555 switch((env->fpuc >> 8) & 3) {
4556 case 0:
4557 rnd_type = 32;
4558 break;
4559 case 2:
4560 rnd_type = 64;
4561 break;
4562 case 3:
4563 default:
4564 rnd_type = 80;
4565 break;
4566 }
4567 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4568#endif
4569}
4570
4571void helper_fldcw(uint32_t val)
4572{
4573 env->fpuc = val;
4574 update_fp_status();
4575}
4576
4577void helper_fclex(void)
4578{
4579 env->fpus &= 0x7f00;
4580}
4581
4582void helper_fwait(void)
4583{
4584 if (env->fpus & FPUS_SE)
4585 fpu_raise_exception();
4586}
4587
4588void helper_fninit(void)
4589{
4590 env->fpus = 0;
4591 env->fpstt = 0;
4592 env->fpuc = 0x37f;
4593 env->fptags[0] = 1;
4594 env->fptags[1] = 1;
4595 env->fptags[2] = 1;
4596 env->fptags[3] = 1;
4597 env->fptags[4] = 1;
4598 env->fptags[5] = 1;
4599 env->fptags[6] = 1;
4600 env->fptags[7] = 1;
4601}
4602
4603/* BCD ops */
4604
4605void helper_fbld_ST0(target_ulong ptr)
4606{
4607 CPU86_LDouble tmp;
4608 uint64_t val;
4609 unsigned int v;
4610 int i;
4611
4612 val = 0;
4613 for(i = 8; i >= 0; i--) {
4614 v = ldub(ptr + i);
4615 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4616 }
4617 tmp = val;
4618 if (ldub(ptr + 9) & 0x80)
4619 tmp = -tmp;
4620 fpush();
4621 ST0 = tmp;
4622}
4623
4624void helper_fbst_ST0(target_ulong ptr)
4625{
4626 int v;
4627 target_ulong mem_ref, mem_end;
4628 int64_t val;
4629
4630 val = floatx_to_int64(ST0, &env->fp_status);
4631 mem_ref = ptr;
4632 mem_end = mem_ref + 9;
4633 if (val < 0) {
4634 stb(mem_end, 0x80);
4635 val = -val;
4636 } else {
4637 stb(mem_end, 0x00);
4638 }
4639 while (mem_ref < mem_end) {
4640 if (val == 0)
4641 break;
4642 v = val % 100;
4643 val = val / 100;
4644 v = ((v / 10) << 4) | (v % 10);
4645 stb(mem_ref++, v);
4646 }
4647 while (mem_ref < mem_end) {
4648 stb(mem_ref++, 0);
4649 }
4650}
4651
4652void helper_f2xm1(void)
4653{
4654 ST0 = pow(2.0,ST0) - 1.0;
4655}
4656
4657void helper_fyl2x(void)
4658{
4659 CPU86_LDouble fptemp;
4660
4661 fptemp = ST0;
4662 if (fptemp>0.0){
4663 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4664 ST1 *= fptemp;
4665 fpop();
4666 } else {
4667 env->fpus &= (~0x4700);
4668 env->fpus |= 0x400;
4669 }
4670}
4671
4672void helper_fptan(void)
4673{
4674 CPU86_LDouble fptemp;
4675
4676 fptemp = ST0;
4677 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4678 env->fpus |= 0x400;
4679 } else {
4680 ST0 = tan(fptemp);
4681 fpush();
4682 ST0 = 1.0;
4683 env->fpus &= (~0x400); /* C2 <-- 0 */
4684 /* the above code is for |arg| < 2**52 only */
4685 }
4686}
4687
4688void helper_fpatan(void)
4689{
4690 CPU86_LDouble fptemp, fpsrcop;
4691
4692 fpsrcop = ST1;
4693 fptemp = ST0;
4694 ST1 = atan2(fpsrcop,fptemp);
4695 fpop();
4696}
4697
4698void helper_fxtract(void)
4699{
4700 CPU86_LDoubleU temp;
4701 unsigned int expdif;
4702
4703 temp.d = ST0;
4704 expdif = EXPD(temp) - EXPBIAS;
4705 /*DP exponent bias*/
4706 ST0 = expdif;
4707 fpush();
4708 BIASEXPONENT(temp);
4709 ST0 = temp.d;
4710}
4711
4712void helper_fprem1(void)
4713{
4714 CPU86_LDouble dblq, fpsrcop, fptemp;
4715 CPU86_LDoubleU fpsrcop1, fptemp1;
4716 int expdif;
4717 signed long long int q;
4718
4719#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4720 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4721#else
4722 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4723#endif
4724 ST0 = 0.0 / 0.0; /* NaN */
4725 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4726 return;
4727 }
4728
4729 fpsrcop = ST0;
4730 fptemp = ST1;
4731 fpsrcop1.d = fpsrcop;
4732 fptemp1.d = fptemp;
4733 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4734
4735 if (expdif < 0) {
4736 /* optimisation? taken from the AMD docs */
4737 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4738 /* ST0 is unchanged */
4739 return;
4740 }
4741
4742 if (expdif < 53) {
4743 dblq = fpsrcop / fptemp;
4744 /* round dblq towards nearest integer */
4745 dblq = rint(dblq);
4746 ST0 = fpsrcop - fptemp * dblq;
4747
4748 /* convert dblq to q by truncating towards zero */
4749 if (dblq < 0.0)
4750 q = (signed long long int)(-dblq);
4751 else
4752 q = (signed long long int)dblq;
4753
4754 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4755 /* (C0,C3,C1) <-- (q2,q1,q0) */
4756 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4757 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4758 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4759 } else {
4760 env->fpus |= 0x400; /* C2 <-- 1 */
4761 fptemp = pow(2.0, expdif - 50);
4762 fpsrcop = (ST0 / ST1) / fptemp;
4763 /* fpsrcop = integer obtained by chopping */
4764 fpsrcop = (fpsrcop < 0.0) ?
4765 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4766 ST0 -= (ST1 * fpsrcop * fptemp);
4767 }
4768}
4769
4770void helper_fprem(void)
4771{
4772 CPU86_LDouble dblq, fpsrcop, fptemp;
4773 CPU86_LDoubleU fpsrcop1, fptemp1;
4774 int expdif;
4775 signed long long int q;
4776
4777#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4778 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4779#else
4780 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4781#endif
4782 ST0 = 0.0 / 0.0; /* NaN */
4783 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4784 return;
4785 }
4786
4787 fpsrcop = (CPU86_LDouble)ST0;
4788 fptemp = (CPU86_LDouble)ST1;
4789 fpsrcop1.d = fpsrcop;
4790 fptemp1.d = fptemp;
4791 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4792
4793 if (expdif < 0) {
4794 /* optimisation? taken from the AMD docs */
4795 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4796 /* ST0 is unchanged */
4797 return;
4798 }
4799
4800 if ( expdif < 53 ) {
4801 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4802 /* round dblq towards zero */
4803 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4804 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4805
4806 /* convert dblq to q by truncating towards zero */
4807 if (dblq < 0.0)
4808 q = (signed long long int)(-dblq);
4809 else
4810 q = (signed long long int)dblq;
4811
4812 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4813 /* (C0,C3,C1) <-- (q2,q1,q0) */
4814 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4815 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4816 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4817 } else {
4818 int N = 32 + (expdif % 32); /* as per AMD docs */
4819 env->fpus |= 0x400; /* C2 <-- 1 */
4820 fptemp = pow(2.0, (double)(expdif - N));
4821 fpsrcop = (ST0 / ST1) / fptemp;
4822 /* fpsrcop = integer obtained by chopping */
4823 fpsrcop = (fpsrcop < 0.0) ?
4824 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4825 ST0 -= (ST1 * fpsrcop * fptemp);
4826 }
4827}
4828
4829void helper_fyl2xp1(void)
4830{
4831 CPU86_LDouble fptemp;
4832
4833 fptemp = ST0;
4834 if ((fptemp+1.0)>0.0) {
4835 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4836 ST1 *= fptemp;
4837 fpop();
4838 } else {
4839 env->fpus &= (~0x4700);
4840 env->fpus |= 0x400;
4841 }
4842}
4843
4844void helper_fsqrt(void)
4845{
4846 CPU86_LDouble fptemp;
4847
4848 fptemp = ST0;
4849 if (fptemp<0.0) {
4850 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4851 env->fpus |= 0x400;
4852 }
4853 ST0 = sqrt(fptemp);
4854}
4855
4856void helper_fsincos(void)
4857{
4858 CPU86_LDouble fptemp;
4859
4860 fptemp = ST0;
4861 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4862 env->fpus |= 0x400;
4863 } else {
4864 ST0 = sin(fptemp);
4865 fpush();
4866 ST0 = cos(fptemp);
4867 env->fpus &= (~0x400); /* C2 <-- 0 */
4868 /* the above code is for |arg| < 2**63 only */
4869 }
4870}
4871
4872void helper_frndint(void)
4873{
4874 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4875}
4876
4877void helper_fscale(void)
4878{
4879 ST0 = ldexp (ST0, (int)(ST1));
4880}
4881
4882void helper_fsin(void)
4883{
4884 CPU86_LDouble fptemp;
4885
4886 fptemp = ST0;
4887 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4888 env->fpus |= 0x400;
4889 } else {
4890 ST0 = sin(fptemp);
4891 env->fpus &= (~0x400); /* C2 <-- 0 */
4892 /* the above code is for |arg| < 2**53 only */
4893 }
4894}
4895
4896void helper_fcos(void)
4897{
4898 CPU86_LDouble fptemp;
4899
4900 fptemp = ST0;
4901 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4902 env->fpus |= 0x400;
4903 } else {
4904 ST0 = cos(fptemp);
4905 env->fpus &= (~0x400); /* C2 <-- 0 */
4906 /* the above code is for |arg5 < 2**63 only */
4907 }
4908}
4909
4910void helper_fxam_ST0(void)
4911{
4912 CPU86_LDoubleU temp;
4913 int expdif;
4914
4915 temp.d = ST0;
4916
4917 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4918 if (SIGND(temp))
4919 env->fpus |= 0x200; /* C1 <-- 1 */
4920
4921 /* XXX: test fptags too */
4922 expdif = EXPD(temp);
4923 if (expdif == MAXEXPD) {
4924#ifdef USE_X86LDOUBLE
4925 if (MANTD(temp) == 0x8000000000000000ULL)
4926#else
4927 if (MANTD(temp) == 0)
4928#endif
4929 env->fpus |= 0x500 /*Infinity*/;
4930 else
4931 env->fpus |= 0x100 /*NaN*/;
4932 } else if (expdif == 0) {
4933 if (MANTD(temp) == 0)
4934 env->fpus |= 0x4000 /*Zero*/;
4935 else
4936 env->fpus |= 0x4400 /*Denormal*/;
4937 } else {
4938 env->fpus |= 0x400;
4939 }
4940}
4941
4942void helper_fstenv(target_ulong ptr, int data32)
4943{
4944 int fpus, fptag, exp, i;
4945 uint64_t mant;
4946 CPU86_LDoubleU tmp;
4947
4948 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4949 fptag = 0;
4950 for (i=7; i>=0; i--) {
4951 fptag <<= 2;
4952 if (env->fptags[i]) {
4953 fptag |= 3;
4954 } else {
4955 tmp.d = env->fpregs[i].d;
4956 exp = EXPD(tmp);
4957 mant = MANTD(tmp);
4958 if (exp == 0 && mant == 0) {
4959 /* zero */
4960 fptag |= 1;
4961 } else if (exp == 0 || exp == MAXEXPD
4962#ifdef USE_X86LDOUBLE
4963 || (mant & (1LL << 63)) == 0
4964#endif
4965 ) {
4966 /* NaNs, infinity, denormal */
4967 fptag |= 2;
4968 }
4969 }
4970 }
4971 if (data32) {
4972 /* 32 bit */
4973 stl(ptr, env->fpuc);
4974 stl(ptr + 4, fpus);
4975 stl(ptr + 8, fptag);
4976 stl(ptr + 12, 0); /* fpip */
4977 stl(ptr + 16, 0); /* fpcs */
4978 stl(ptr + 20, 0); /* fpoo */
4979 stl(ptr + 24, 0); /* fpos */
4980 } else {
4981 /* 16 bit */
4982 stw(ptr, env->fpuc);
4983 stw(ptr + 2, fpus);
4984 stw(ptr + 4, fptag);
4985 stw(ptr + 6, 0);
4986 stw(ptr + 8, 0);
4987 stw(ptr + 10, 0);
4988 stw(ptr + 12, 0);
4989 }
4990}
4991
4992void helper_fldenv(target_ulong ptr, int data32)
4993{
4994 int i, fpus, fptag;
4995
4996 if (data32) {
4997 env->fpuc = lduw(ptr);
4998 fpus = lduw(ptr + 4);
4999 fptag = lduw(ptr + 8);
5000 }
5001 else {
5002 env->fpuc = lduw(ptr);
5003 fpus = lduw(ptr + 2);
5004 fptag = lduw(ptr + 4);
5005 }
5006 env->fpstt = (fpus >> 11) & 7;
5007 env->fpus = fpus & ~0x3800;
5008 for(i = 0;i < 8; i++) {
5009 env->fptags[i] = ((fptag & 3) == 3);
5010 fptag >>= 2;
5011 }
5012}
5013
5014void helper_fsave(target_ulong ptr, int data32)
5015{
5016 CPU86_LDouble tmp;
5017 int i;
5018
5019 helper_fstenv(ptr, data32);
5020
5021 ptr += (14 << data32);
5022 for(i = 0;i < 8; i++) {
5023 tmp = ST(i);
5024 helper_fstt(tmp, ptr);
5025 ptr += 10;
5026 }
5027
5028 /* fninit */
5029 env->fpus = 0;
5030 env->fpstt = 0;
5031 env->fpuc = 0x37f;
5032 env->fptags[0] = 1;
5033 env->fptags[1] = 1;
5034 env->fptags[2] = 1;
5035 env->fptags[3] = 1;
5036 env->fptags[4] = 1;
5037 env->fptags[5] = 1;
5038 env->fptags[6] = 1;
5039 env->fptags[7] = 1;
5040}
5041
5042void helper_frstor(target_ulong ptr, int data32)
5043{
5044 CPU86_LDouble tmp;
5045 int i;
5046
5047 helper_fldenv(ptr, data32);
5048 ptr += (14 << data32);
5049
5050 for(i = 0;i < 8; i++) {
5051 tmp = helper_fldt(ptr);
5052 ST(i) = tmp;
5053 ptr += 10;
5054 }
5055}
5056
5057void helper_fxsave(target_ulong ptr, int data64)
5058{
5059 int fpus, fptag, i, nb_xmm_regs;
5060 CPU86_LDouble tmp;
5061 target_ulong addr;
5062
5063 /* The operand must be 16 byte aligned */
5064 if (ptr & 0xf) {
5065 raise_exception(EXCP0D_GPF);
5066 }
5067
5068 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5069 fptag = 0;
5070 for(i = 0; i < 8; i++) {
5071 fptag |= (env->fptags[i] << i);
5072 }
5073 stw(ptr, env->fpuc);
5074 stw(ptr + 2, fpus);
5075 stw(ptr + 4, fptag ^ 0xff);
5076#ifdef TARGET_X86_64
5077 if (data64) {
5078 stq(ptr + 0x08, 0); /* rip */
5079 stq(ptr + 0x10, 0); /* rdp */
5080 } else
5081#endif
5082 {
5083 stl(ptr + 0x08, 0); /* eip */
5084 stl(ptr + 0x0c, 0); /* sel */
5085 stl(ptr + 0x10, 0); /* dp */
5086 stl(ptr + 0x14, 0); /* sel */
5087 }
5088
5089 addr = ptr + 0x20;
5090 for(i = 0;i < 8; i++) {
5091 tmp = ST(i);
5092 helper_fstt(tmp, addr);
5093 addr += 16;
5094 }
5095
5096 if (env->cr[4] & CR4_OSFXSR_MASK) {
5097 /* XXX: finish it */
5098 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5099 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5100 if (env->hflags & HF_CS64_MASK)
5101 nb_xmm_regs = 16;
5102 else
5103 nb_xmm_regs = 8;
5104 addr = ptr + 0xa0;
5105 /* Fast FXSAVE leaves out the XMM registers */
5106 if (!(env->efer & MSR_EFER_FFXSR)
5107 || (env->hflags & HF_CPL_MASK)
5108 || !(env->hflags & HF_LMA_MASK)) {
5109 for(i = 0; i < nb_xmm_regs; i++) {
5110 stq(addr, env->xmm_regs[i].XMM_Q(0));
5111 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5112 addr += 16;
5113 }
5114 }
5115 }
5116}
5117
5118void helper_fxrstor(target_ulong ptr, int data64)
5119{
5120 int i, fpus, fptag, nb_xmm_regs;
5121 CPU86_LDouble tmp;
5122 target_ulong addr;
5123
5124 /* The operand must be 16 byte aligned */
5125 if (ptr & 0xf) {
5126 raise_exception(EXCP0D_GPF);
5127 }
5128
5129 env->fpuc = lduw(ptr);
5130 fpus = lduw(ptr + 2);
5131 fptag = lduw(ptr + 4);
5132 env->fpstt = (fpus >> 11) & 7;
5133 env->fpus = fpus & ~0x3800;
5134 fptag ^= 0xff;
5135 for(i = 0;i < 8; i++) {
5136 env->fptags[i] = ((fptag >> i) & 1);
5137 }
5138
5139 addr = ptr + 0x20;
5140 for(i = 0;i < 8; i++) {
5141 tmp = helper_fldt(addr);
5142 ST(i) = tmp;
5143 addr += 16;
5144 }
5145
5146 if (env->cr[4] & CR4_OSFXSR_MASK) {
5147 /* XXX: finish it */
5148 env->mxcsr = ldl(ptr + 0x18);
5149 //ldl(ptr + 0x1c);
5150 if (env->hflags & HF_CS64_MASK)
5151 nb_xmm_regs = 16;
5152 else
5153 nb_xmm_regs = 8;
5154 addr = ptr + 0xa0;
5155 /* Fast FXRESTORE leaves out the XMM registers */
5156 if (!(env->efer & MSR_EFER_FFXSR)
5157 || (env->hflags & HF_CPL_MASK)
5158 || !(env->hflags & HF_LMA_MASK)) {
5159 for(i = 0; i < nb_xmm_regs; i++) {
5160#if !defined(VBOX) || __GNUC__ < 4
5161 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5162 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5163#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5164# if 1
5165 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5166 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5167 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5168 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5169# else
5170 /* this works fine on Mac OS X, gcc 4.0.1 */
5171 uint64_t u64 = ldq(addr);
5172 env->xmm_regs[i].XMM_Q(0);
5173 u64 = ldq(addr + 4);
5174 env->xmm_regs[i].XMM_Q(1) = u64;
5175# endif
5176#endif
5177 addr += 16;
5178 }
5179 }
5180 }
5181}
5182
5183#ifndef USE_X86LDOUBLE
5184
5185void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5186{
5187 CPU86_LDoubleU temp;
5188 int e;
5189
5190 temp.d = f;
5191 /* mantissa */
5192 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5193 /* exponent + sign */
5194 e = EXPD(temp) - EXPBIAS + 16383;
5195 e |= SIGND(temp) >> 16;
5196 *pexp = e;
5197}
5198
5199CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5200{
5201 CPU86_LDoubleU temp;
5202 int e;
5203 uint64_t ll;
5204
5205 /* XXX: handle overflow ? */
5206 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5207 e |= (upper >> 4) & 0x800; /* sign */
5208 ll = (mant >> 11) & ((1LL << 52) - 1);
5209#ifdef __arm__
5210 temp.l.upper = (e << 20) | (ll >> 32);
5211 temp.l.lower = ll;
5212#else
5213 temp.ll = ll | ((uint64_t)e << 52);
5214#endif
5215 return temp.d;
5216}
5217
5218#else
5219
5220void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5221{
5222 CPU86_LDoubleU temp;
5223
5224 temp.d = f;
5225 *pmant = temp.l.lower;
5226 *pexp = temp.l.upper;
5227}
5228
5229CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5230{
5231 CPU86_LDoubleU temp;
5232
5233 temp.l.upper = upper;
5234 temp.l.lower = mant;
5235 return temp.d;
5236}
5237#endif
5238
5239#ifdef TARGET_X86_64
5240
5241//#define DEBUG_MULDIV
5242
5243static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5244{
5245 *plow += a;
5246 /* carry test */
5247 if (*plow < a)
5248 (*phigh)++;
5249 *phigh += b;
5250}
5251
5252static void neg128(uint64_t *plow, uint64_t *phigh)
5253{
5254 *plow = ~ *plow;
5255 *phigh = ~ *phigh;
5256 add128(plow, phigh, 1, 0);
5257}
5258
5259/* return TRUE if overflow */
5260static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5261{
5262 uint64_t q, r, a1, a0;
5263 int i, qb, ab;
5264
5265 a0 = *plow;
5266 a1 = *phigh;
5267 if (a1 == 0) {
5268 q = a0 / b;
5269 r = a0 % b;
5270 *plow = q;
5271 *phigh = r;
5272 } else {
5273 if (a1 >= b)
5274 return 1;
5275 /* XXX: use a better algorithm */
5276 for(i = 0; i < 64; i++) {
5277 ab = a1 >> 63;
5278 a1 = (a1 << 1) | (a0 >> 63);
5279 if (ab || a1 >= b) {
5280 a1 -= b;
5281 qb = 1;
5282 } else {
5283 qb = 0;
5284 }
5285 a0 = (a0 << 1) | qb;
5286 }
5287#if defined(DEBUG_MULDIV)
5288 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5289 *phigh, *plow, b, a0, a1);
5290#endif
5291 *plow = a0;
5292 *phigh = a1;
5293 }
5294 return 0;
5295}
5296
5297/* return TRUE if overflow */
5298static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5299{
5300 int sa, sb;
5301 sa = ((int64_t)*phigh < 0);
5302 if (sa)
5303 neg128(plow, phigh);
5304 sb = (b < 0);
5305 if (sb)
5306 b = -b;
5307 if (div64(plow, phigh, b) != 0)
5308 return 1;
5309 if (sa ^ sb) {
5310 if (*plow > (1ULL << 63))
5311 return 1;
5312 *plow = - *plow;
5313 } else {
5314 if (*plow >= (1ULL << 63))
5315 return 1;
5316 }
5317 if (sa)
5318 *phigh = - *phigh;
5319 return 0;
5320}
5321
5322void helper_mulq_EAX_T0(target_ulong t0)
5323{
5324 uint64_t r0, r1;
5325
5326 mulu64(&r0, &r1, EAX, t0);
5327 EAX = r0;
5328 EDX = r1;
5329 CC_DST = r0;
5330 CC_SRC = r1;
5331}
5332
5333void helper_imulq_EAX_T0(target_ulong t0)
5334{
5335 uint64_t r0, r1;
5336
5337 muls64(&r0, &r1, EAX, t0);
5338 EAX = r0;
5339 EDX = r1;
5340 CC_DST = r0;
5341 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5342}
5343
5344target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5345{
5346 uint64_t r0, r1;
5347
5348 muls64(&r0, &r1, t0, t1);
5349 CC_DST = r0;
5350 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5351 return r0;
5352}
5353
5354void helper_divq_EAX(target_ulong t0)
5355{
5356 uint64_t r0, r1;
5357 if (t0 == 0) {
5358 raise_exception(EXCP00_DIVZ);
5359 }
5360 r0 = EAX;
5361 r1 = EDX;
5362 if (div64(&r0, &r1, t0))
5363 raise_exception(EXCP00_DIVZ);
5364 EAX = r0;
5365 EDX = r1;
5366}
5367
5368void helper_idivq_EAX(target_ulong t0)
5369{
5370 uint64_t r0, r1;
5371 if (t0 == 0) {
5372 raise_exception(EXCP00_DIVZ);
5373 }
5374 r0 = EAX;
5375 r1 = EDX;
5376 if (idiv64(&r0, &r1, t0))
5377 raise_exception(EXCP00_DIVZ);
5378 EAX = r0;
5379 EDX = r1;
5380}
5381#endif
5382
5383static void do_hlt(void)
5384{
5385 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5386 env->halted = 1;
5387 env->exception_index = EXCP_HLT;
5388 cpu_loop_exit();
5389}
5390
5391void helper_hlt(int next_eip_addend)
5392{
5393 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5394 EIP += next_eip_addend;
5395
5396 do_hlt();
5397}
5398
5399void helper_monitor(target_ulong ptr)
5400{
5401#ifdef VBOX
5402 if ((uint32_t)ECX > 1)
5403 raise_exception(EXCP0D_GPF);
5404#else /* !VBOX */
5405 if ((uint32_t)ECX != 0)
5406 raise_exception(EXCP0D_GPF);
5407#endif /* !VBOX */
5408 /* XXX: store address ? */
5409 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5410}
5411
5412void helper_mwait(int next_eip_addend)
5413{
5414 if ((uint32_t)ECX != 0)
5415 raise_exception(EXCP0D_GPF);
5416#ifdef VBOX
5417 helper_hlt(next_eip_addend);
5418#else /* !VBOX */
5419 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5420 EIP += next_eip_addend;
5421
5422 /* XXX: not complete but not completely erroneous */
5423 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5424 /* more than one CPU: do not sleep because another CPU may
5425 wake this one */
5426 } else {
5427 do_hlt();
5428 }
5429#endif /* !VBOX */
5430}
5431
5432void helper_debug(void)
5433{
5434 env->exception_index = EXCP_DEBUG;
5435 cpu_loop_exit();
5436}
5437
5438void helper_reset_rf(void)
5439{
5440 env->eflags &= ~RF_MASK;
5441}
5442
5443void helper_raise_interrupt(int intno, int next_eip_addend)
5444{
5445 raise_interrupt(intno, 1, 0, next_eip_addend);
5446}
5447
5448void helper_raise_exception(int exception_index)
5449{
5450 raise_exception(exception_index);
5451}
5452
5453void helper_cli(void)
5454{
5455 env->eflags &= ~IF_MASK;
5456}
5457
5458void helper_sti(void)
5459{
5460 env->eflags |= IF_MASK;
5461}
5462
5463#ifdef VBOX
5464void helper_cli_vme(void)
5465{
5466 env->eflags &= ~VIF_MASK;
5467}
5468
5469void helper_sti_vme(void)
5470{
5471 /* First check, then change eflags according to the AMD manual */
5472 if (env->eflags & VIP_MASK) {
5473 raise_exception(EXCP0D_GPF);
5474 }
5475 env->eflags |= VIF_MASK;
5476}
5477#endif /* VBOX */
5478
5479#if 0
5480/* vm86plus instructions */
5481void helper_cli_vm(void)
5482{
5483 env->eflags &= ~VIF_MASK;
5484}
5485
5486void helper_sti_vm(void)
5487{
5488 env->eflags |= VIF_MASK;
5489 if (env->eflags & VIP_MASK) {
5490 raise_exception(EXCP0D_GPF);
5491 }
5492}
5493#endif
5494
5495void helper_set_inhibit_irq(void)
5496{
5497 env->hflags |= HF_INHIBIT_IRQ_MASK;
5498}
5499
5500void helper_reset_inhibit_irq(void)
5501{
5502 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5503}
5504
5505void helper_boundw(target_ulong a0, int v)
5506{
5507 int low, high;
5508 low = ldsw(a0);
5509 high = ldsw(a0 + 2);
5510 v = (int16_t)v;
5511 if (v < low || v > high) {
5512 raise_exception(EXCP05_BOUND);
5513 }
5514}
5515
5516void helper_boundl(target_ulong a0, int v)
5517{
5518 int low, high;
5519 low = ldl(a0);
5520 high = ldl(a0 + 4);
5521 if (v < low || v > high) {
5522 raise_exception(EXCP05_BOUND);
5523 }
5524}
5525
5526static float approx_rsqrt(float a)
5527{
5528 return 1.0 / sqrt(a);
5529}
5530
5531static float approx_rcp(float a)
5532{
5533 return 1.0 / a;
5534}
5535
5536#if !defined(CONFIG_USER_ONLY)
5537
5538#define MMUSUFFIX _mmu
5539
5540#define SHIFT 0
5541#include "softmmu_template.h"
5542
5543#define SHIFT 1
5544#include "softmmu_template.h"
5545
5546#define SHIFT 2
5547#include "softmmu_template.h"
5548
5549#define SHIFT 3
5550#include "softmmu_template.h"
5551
5552#endif
5553
5554#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5555/* This code assumes real physical address always fit into host CPU reg,
5556 which is wrong in general, but true for our current use cases. */
5557RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5558{
5559 return remR3PhysReadS8(addr);
5560}
5561RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5562{
5563 return remR3PhysReadU8(addr);
5564}
5565void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5566{
5567 remR3PhysWriteU8(addr, val);
5568}
5569RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5570{
5571 return remR3PhysReadS16(addr);
5572}
5573RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5574{
5575 return remR3PhysReadU16(addr);
5576}
5577void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5578{
5579 remR3PhysWriteU16(addr, val);
5580}
5581RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5582{
5583 return remR3PhysReadS32(addr);
5584}
5585RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5586{
5587 return remR3PhysReadU32(addr);
5588}
5589void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5590{
5591 remR3PhysWriteU32(addr, val);
5592}
5593uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5594{
5595 return remR3PhysReadU64(addr);
5596}
5597void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5598{
5599 remR3PhysWriteU64(addr, val);
5600}
5601#endif /* VBOX */
5602
5603#if !defined(CONFIG_USER_ONLY)
5604/* try to fill the TLB and return an exception if error. If retaddr is
5605 NULL, it means that the function was called in C code (i.e. not
5606 from generated code or from helper.c) */
5607/* XXX: fix it to restore all registers */
5608void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5609{
5610 TranslationBlock *tb;
5611 int ret;
5612 unsigned long pc;
5613 CPUX86State *saved_env;
5614
5615 /* XXX: hack to restore env in all cases, even if not called from
5616 generated code */
5617 saved_env = env;
5618 env = cpu_single_env;
5619
5620 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5621 if (ret) {
5622 if (retaddr) {
5623 /* now we have a real cpu fault */
5624 pc = (unsigned long)retaddr;
5625 tb = tb_find_pc(pc);
5626 if (tb) {
5627 /* the PC is inside the translated code. It means that we have
5628 a virtual CPU fault */
5629 cpu_restore_state(tb, env, pc, NULL);
5630 }
5631 }
5632 raise_exception_err(env->exception_index, env->error_code);
5633 }
5634 env = saved_env;
5635}
5636#endif
5637
5638#ifdef VBOX
5639
5640/**
5641 * Correctly computes the eflags.
5642 * @returns eflags.
5643 * @param env1 CPU environment.
5644 */
5645uint32_t raw_compute_eflags(CPUX86State *env1)
5646{
5647 CPUX86State *savedenv = env;
5648 uint32_t efl;
5649 env = env1;
5650 efl = compute_eflags();
5651 env = savedenv;
5652 return efl;
5653}
5654
5655/**
5656 * Reads byte from virtual address in guest memory area.
5657 * XXX: is it working for any addresses? swapped out pages?
5658 * @returns read data byte.
5659 * @param env1 CPU environment.
5660 * @param pvAddr GC Virtual address.
5661 */
5662uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5663{
5664 CPUX86State *savedenv = env;
5665 uint8_t u8;
5666 env = env1;
5667 u8 = ldub_kernel(addr);
5668 env = savedenv;
5669 return u8;
5670}
5671
5672/**
5673 * Reads byte from virtual address in guest memory area.
5674 * XXX: is it working for any addresses? swapped out pages?
5675 * @returns read data byte.
5676 * @param env1 CPU environment.
5677 * @param pvAddr GC Virtual address.
5678 */
5679uint16_t read_word(CPUX86State *env1, target_ulong addr)
5680{
5681 CPUX86State *savedenv = env;
5682 uint16_t u16;
5683 env = env1;
5684 u16 = lduw_kernel(addr);
5685 env = savedenv;
5686 return u16;
5687}
5688
5689/**
5690 * Reads byte from virtual address in guest memory area.
5691 * XXX: is it working for any addresses? swapped out pages?
5692 * @returns read data byte.
5693 * @param env1 CPU environment.
5694 * @param pvAddr GC Virtual address.
5695 */
5696uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5697{
5698 CPUX86State *savedenv = env;
5699 uint32_t u32;
5700 env = env1;
5701 u32 = ldl_kernel(addr);
5702 env = savedenv;
5703 return u32;
5704}
5705
5706/**
5707 * Writes byte to virtual address in guest memory area.
5708 * XXX: is it working for any addresses? swapped out pages?
5709 * @returns read data byte.
5710 * @param env1 CPU environment.
5711 * @param pvAddr GC Virtual address.
5712 * @param val byte value
5713 */
5714void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5715{
5716 CPUX86State *savedenv = env;
5717 env = env1;
5718 stb(addr, val);
5719 env = savedenv;
5720}
5721
5722void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5723{
5724 CPUX86State *savedenv = env;
5725 env = env1;
5726 stw(addr, val);
5727 env = savedenv;
5728}
5729
5730void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5731{
5732 CPUX86State *savedenv = env;
5733 env = env1;
5734 stl(addr, val);
5735 env = savedenv;
5736}
5737
5738/**
5739 * Correctly loads selector into segment register with updating internal
5740 * qemu data/caches.
5741 * @param env1 CPU environment.
5742 * @param seg_reg Segment register.
5743 * @param selector Selector to load.
5744 */
5745void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5746{
5747 CPUX86State *savedenv = env;
5748#ifdef FORCE_SEGMENT_SYNC
5749 jmp_buf old_buf;
5750#endif
5751
5752 env = env1;
5753
5754 if ( env->eflags & X86_EFL_VM
5755 || !(env->cr[0] & X86_CR0_PE))
5756 {
5757 load_seg_vm(seg_reg, selector);
5758
5759 env = savedenv;
5760
5761 /* Successful sync. */
5762 Assert(env1->segs[seg_reg].newselector == 0);
5763 }
5764 else
5765 {
5766 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5767 time critical - let's not do that */
5768#ifdef FORCE_SEGMENT_SYNC
5769 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5770#endif
5771 if (setjmp(env1->jmp_env) == 0)
5772 {
5773 if (seg_reg == R_CS)
5774 {
5775 uint32_t e1, e2;
5776 e1 = e2 = 0;
5777 load_segment(&e1, &e2, selector);
5778 cpu_x86_load_seg_cache(env, R_CS, selector,
5779 get_seg_base(e1, e2),
5780 get_seg_limit(e1, e2),
5781 e2);
5782 }
5783 else
5784 helper_load_seg(seg_reg, selector);
5785 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5786 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5787
5788 env = savedenv;
5789
5790 /* Successful sync. */
5791 Assert(env1->segs[seg_reg].newselector == 0);
5792 }
5793 else
5794 {
5795 env = savedenv;
5796
5797 /* Postpone sync until the guest uses the selector. */
5798 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5799 env1->segs[seg_reg].newselector = selector;
5800 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5801 env1->exception_index = -1;
5802 env1->error_code = 0;
5803 env1->old_exception = -1;
5804 }
5805#ifdef FORCE_SEGMENT_SYNC
5806 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5807#endif
5808 }
5809
5810}
5811
5812DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5813{
5814 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5815}
5816
5817
5818int emulate_single_instr(CPUX86State *env1)
5819{
5820 TranslationBlock *tb;
5821 TranslationBlock *current;
5822 int flags;
5823 uint8_t *tc_ptr;
5824 target_ulong old_eip;
5825
5826 /* ensures env is loaded! */
5827 CPUX86State *savedenv = env;
5828 env = env1;
5829
5830 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5831
5832 current = env->current_tb;
5833 env->current_tb = NULL;
5834 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5835
5836 /*
5837 * Translate only one instruction.
5838 */
5839 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5840 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5841 env->segs[R_CS].base, flags, 0);
5842
5843 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5844
5845
5846 /* tb_link_phys: */
5847 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5848 tb->jmp_next[0] = NULL;
5849 tb->jmp_next[1] = NULL;
5850 Assert(tb->jmp_next[0] == NULL);
5851 Assert(tb->jmp_next[1] == NULL);
5852 if (tb->tb_next_offset[0] != 0xffff)
5853 tb_reset_jump(tb, 0);
5854 if (tb->tb_next_offset[1] != 0xffff)
5855 tb_reset_jump(tb, 1);
5856
5857 /*
5858 * Execute it using emulation
5859 */
5860 old_eip = env->eip;
5861 env->current_tb = tb;
5862
5863 /*
5864 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5865 * perhaps not a very safe hack
5866 */
5867 while (old_eip == env->eip)
5868 {
5869 tc_ptr = tb->tc_ptr;
5870
5871#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5872 int fake_ret;
5873 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5874#else
5875 tcg_qemu_tb_exec(tc_ptr);
5876#endif
5877
5878 /*
5879 * Exit once we detect an external interrupt and interrupts are enabled
5880 */
5881 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
5882 || ( (env->eflags & IF_MASK)
5883 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
5884 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
5885 )
5886 {
5887 break;
5888 }
5889 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
5890 tlb_flush(env, true);
5891 }
5892 }
5893 env->current_tb = current;
5894
5895 tb_phys_invalidate(tb, -1);
5896 tb_free(tb);
5897/*
5898 Assert(tb->tb_next_offset[0] == 0xffff);
5899 Assert(tb->tb_next_offset[1] == 0xffff);
5900 Assert(tb->tb_next[0] == 0xffff);
5901 Assert(tb->tb_next[1] == 0xffff);
5902 Assert(tb->jmp_next[0] == NULL);
5903 Assert(tb->jmp_next[1] == NULL);
5904 Assert(tb->jmp_first == NULL); */
5905
5906 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5907
5908 /*
5909 * Execute the next instruction when we encounter instruction fusing.
5910 */
5911 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5912 {
5913 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5914 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5915 emulate_single_instr(env);
5916 }
5917
5918 env = savedenv;
5919 return 0;
5920}
5921
5922/**
5923 * Correctly loads a new ldtr selector.
5924 *
5925 * @param env1 CPU environment.
5926 * @param selector Selector to load.
5927 */
5928void sync_ldtr(CPUX86State *env1, int selector)
5929{
5930 CPUX86State *saved_env = env;
5931 if (setjmp(env1->jmp_env) == 0)
5932 {
5933 env = env1;
5934 helper_lldt(selector);
5935 env = saved_env;
5936 }
5937 else
5938 {
5939 env = saved_env;
5940#ifdef VBOX_STRICT
5941 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5942#endif
5943 }
5944}
5945
5946int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5947 uint32_t *esp_ptr, int dpl)
5948{
5949 int type, index, shift;
5950
5951 CPUX86State *savedenv = env;
5952 env = env1;
5953
5954 if (!(env->tr.flags & DESC_P_MASK))
5955 cpu_abort(env, "invalid tss");
5956 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5957 if ((type & 7) != 1)
5958 cpu_abort(env, "invalid tss type %d", type);
5959 shift = type >> 3;
5960 index = (dpl * 4 + 2) << shift;
5961 if (index + (4 << shift) - 1 > env->tr.limit)
5962 {
5963 env = savedenv;
5964 return 0;
5965 }
5966 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5967
5968 if (shift == 0) {
5969 *esp_ptr = lduw_kernel(env->tr.base + index);
5970 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5971 } else {
5972 *esp_ptr = ldl_kernel(env->tr.base + index);
5973 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5974 }
5975
5976 env = savedenv;
5977 return 1;
5978}
5979
5980//*****************************************************************************
5981// Needs to be at the bottom of the file (overriding macros)
5982
5983static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5984{
5985#ifdef USE_X86LDOUBLE
5986 CPU86_LDoubleU tmp;
5987 tmp.l.lower = *(uint64_t const *)ptr;
5988 tmp.l.upper = *(uint16_t const *)(ptr + 8);
5989 return tmp.d;
5990#else
5991# error "Busted FPU saving/restoring!"
5992 return *(CPU86_LDouble *)ptr;
5993#endif
5994}
5995
5996static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5997{
5998#ifdef USE_X86LDOUBLE
5999 CPU86_LDoubleU tmp;
6000 tmp.d = f;
6001 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6002 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6003 *(uint16_t *)(ptr + 10) = 0;
6004 *(uint32_t *)(ptr + 12) = 0;
6005 AssertCompile(sizeof(long double) > 8);
6006#else
6007# error "Busted FPU saving/restoring!"
6008 *(CPU86_LDouble *)ptr = f;
6009#endif
6010}
6011
6012#undef stw
6013#undef stl
6014#undef stq
6015#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6016#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6017#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6018
6019//*****************************************************************************
6020void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6021{
6022 int fpus, fptag, i, nb_xmm_regs;
6023 CPU86_LDouble tmp;
6024 uint8_t *addr;
6025 int data64 = !!(env->hflags & HF_LMA_MASK);
6026
6027 if (env->cpuid_features & CPUID_FXSR)
6028 {
6029 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6030 fptag = 0;
6031 for(i = 0; i < 8; i++) {
6032 fptag |= (env->fptags[i] << i);
6033 }
6034 stw(ptr, env->fpuc);
6035 stw(ptr + 2, fpus);
6036 stw(ptr + 4, fptag ^ 0xff);
6037
6038 addr = ptr + 0x20;
6039 for(i = 0;i < 8; i++) {
6040 tmp = ST(i);
6041 helper_fstt_raw(tmp, addr);
6042 addr += 16;
6043 }
6044
6045 if (env->cr[4] & CR4_OSFXSR_MASK) {
6046 /* XXX: finish it */
6047 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6048 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6049 nb_xmm_regs = 8 << data64;
6050 addr = ptr + 0xa0;
6051 for(i = 0; i < nb_xmm_regs; i++) {
6052#if __GNUC__ < 4
6053 stq(addr, env->xmm_regs[i].XMM_Q(0));
6054 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6055#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6056 stl(addr, env->xmm_regs[i].XMM_L(0));
6057 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6058 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6059 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6060#endif
6061 addr += 16;
6062 }
6063 }
6064 }
6065 else
6066 {
6067 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6068 int fptag;
6069
6070 fp->FCW = env->fpuc;
6071 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6072 fptag = 0;
6073 for (i=7; i>=0; i--) {
6074 fptag <<= 2;
6075 if (env->fptags[i]) {
6076 fptag |= 3;
6077 } else {
6078 /* the FPU automatically computes it */
6079 }
6080 }
6081 fp->FTW = fptag;
6082
6083 for(i = 0;i < 8; i++) {
6084 tmp = ST(i);
6085 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6086 }
6087 }
6088}
6089
6090//*****************************************************************************
6091#undef lduw
6092#undef ldl
6093#undef ldq
6094#define lduw(a) *(uint16_t *)(a)
6095#define ldl(a) *(uint32_t *)(a)
6096#define ldq(a) *(uint64_t *)(a)
6097//*****************************************************************************
6098void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6099{
6100 int i, fpus, fptag, nb_xmm_regs;
6101 CPU86_LDouble tmp;
6102 uint8_t *addr;
6103 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6104
6105 if (env->cpuid_features & CPUID_FXSR)
6106 {
6107 env->fpuc = lduw(ptr);
6108 fpus = lduw(ptr + 2);
6109 fptag = lduw(ptr + 4);
6110 env->fpstt = (fpus >> 11) & 7;
6111 env->fpus = fpus & ~0x3800;
6112 fptag ^= 0xff;
6113 for(i = 0;i < 8; i++) {
6114 env->fptags[i] = ((fptag >> i) & 1);
6115 }
6116
6117 addr = ptr + 0x20;
6118 for(i = 0;i < 8; i++) {
6119 tmp = helper_fldt_raw(addr);
6120 ST(i) = tmp;
6121 addr += 16;
6122 }
6123
6124 if (env->cr[4] & CR4_OSFXSR_MASK) {
6125 /* XXX: finish it, endianness */
6126 env->mxcsr = ldl(ptr + 0x18);
6127 //ldl(ptr + 0x1c);
6128 nb_xmm_regs = 8 << data64;
6129 addr = ptr + 0xa0;
6130 for(i = 0; i < nb_xmm_regs; i++) {
6131#if HC_ARCH_BITS == 32
6132 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6133 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6134 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6135 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6136 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6137#else
6138 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6139 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6140#endif
6141 addr += 16;
6142 }
6143 }
6144 }
6145 else
6146 {
6147 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6148 int fptag, j;
6149
6150 env->fpuc = fp->FCW;
6151 env->fpstt = (fp->FSW >> 11) & 7;
6152 env->fpus = fp->FSW & ~0x3800;
6153 fptag = fp->FTW;
6154 for(i = 0;i < 8; i++) {
6155 env->fptags[i] = ((fptag & 3) == 3);
6156 fptag >>= 2;
6157 }
6158 j = env->fpstt;
6159 for(i = 0;i < 8; i++) {
6160 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6161 ST(i) = tmp;
6162 }
6163 }
6164}
6165//*****************************************************************************
6166//*****************************************************************************
6167
6168#endif /* VBOX */
6169
6170/* Secure Virtual Machine helpers */
6171
6172#if defined(CONFIG_USER_ONLY)
6173
6174void helper_vmrun(int aflag, int next_eip_addend)
6175{
6176}
6177void helper_vmmcall(void)
6178{
6179}
6180void helper_vmload(int aflag)
6181{
6182}
6183void helper_vmsave(int aflag)
6184{
6185}
6186void helper_stgi(void)
6187{
6188}
6189void helper_clgi(void)
6190{
6191}
6192void helper_skinit(void)
6193{
6194}
6195void helper_invlpga(int aflag)
6196{
6197}
6198void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6199{
6200}
6201void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6202{
6203}
6204
6205void helper_svm_check_io(uint32_t port, uint32_t param,
6206 uint32_t next_eip_addend)
6207{
6208}
6209#else
6210
6211static inline void svm_save_seg(target_phys_addr_t addr,
6212 const SegmentCache *sc)
6213{
6214 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6215 sc->selector);
6216 stq_phys(addr + offsetof(struct vmcb_seg, base),
6217 sc->base);
6218 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6219 sc->limit);
6220 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6221 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6222}
6223
6224static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6225{
6226 unsigned int flags;
6227
6228 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6229 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6230 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6231 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6232 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6233}
6234
6235static inline void svm_load_seg_cache(target_phys_addr_t addr,
6236 CPUState *env, int seg_reg)
6237{
6238 SegmentCache sc1, *sc = &sc1;
6239 svm_load_seg(addr, sc);
6240 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6241 sc->base, sc->limit, sc->flags);
6242}
6243
6244void helper_vmrun(int aflag, int next_eip_addend)
6245{
6246 target_ulong addr;
6247 uint32_t event_inj;
6248 uint32_t int_ctl;
6249
6250 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6251
6252 if (aflag == 2)
6253 addr = EAX;
6254 else
6255 addr = (uint32_t)EAX;
6256
6257 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6258
6259 env->vm_vmcb = addr;
6260
6261 /* save the current CPU state in the hsave page */
6262 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6263 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6264
6265 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6266 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6267
6268 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6269 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6270 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6271 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6272 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6273 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6274
6275 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6276 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6277
6278 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6279 &env->segs[R_ES]);
6280 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6281 &env->segs[R_CS]);
6282 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6283 &env->segs[R_SS]);
6284 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6285 &env->segs[R_DS]);
6286
6287 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6288 EIP + next_eip_addend);
6289 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6290 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6291
6292 /* load the interception bitmaps so we do not need to access the
6293 vmcb in svm mode */
6294 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6295 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6296 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6297 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6298 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6299 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6300
6301 /* enable intercepts */
6302 env->hflags |= HF_SVMI_MASK;
6303
6304 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6305
6306 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6307 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6308
6309 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6310 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6311
6312 /* clear exit_info_2 so we behave like the real hardware */
6313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6314
6315 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6316 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6317 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6318 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6319 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6320 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6321 if (int_ctl & V_INTR_MASKING_MASK) {
6322 env->v_tpr = int_ctl & V_TPR_MASK;
6323 env->hflags2 |= HF2_VINTR_MASK;
6324 if (env->eflags & IF_MASK)
6325 env->hflags2 |= HF2_HIF_MASK;
6326 }
6327
6328 cpu_load_efer(env,
6329 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6330 env->eflags = 0;
6331 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6332 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6333 CC_OP = CC_OP_EFLAGS;
6334
6335 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6336 env, R_ES);
6337 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6338 env, R_CS);
6339 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6340 env, R_SS);
6341 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6342 env, R_DS);
6343
6344 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6345 env->eip = EIP;
6346 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6347 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6348 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6349 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6350 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6351
6352 /* FIXME: guest state consistency checks */
6353
6354 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6355 case TLB_CONTROL_DO_NOTHING:
6356 break;
6357 case TLB_CONTROL_FLUSH_ALL_ASID:
6358 /* FIXME: this is not 100% correct but should work for now */
6359 tlb_flush(env, 1);
6360 break;
6361 }
6362
6363 env->hflags2 |= HF2_GIF_MASK;
6364
6365 if (int_ctl & V_IRQ_MASK) {
6366 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6367 }
6368
6369 /* maybe we need to inject an event */
6370 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6371 if (event_inj & SVM_EVTINJ_VALID) {
6372 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6373 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6374 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6375
6376 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6377 /* FIXME: need to implement valid_err */
6378 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6379 case SVM_EVTINJ_TYPE_INTR:
6380 env->exception_index = vector;
6381 env->error_code = event_inj_err;
6382 env->exception_is_int = 0;
6383 env->exception_next_eip = -1;
6384 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6385 /* XXX: is it always correct ? */
6386 do_interrupt(vector, 0, 0, 0, 1);
6387 break;
6388 case SVM_EVTINJ_TYPE_NMI:
6389 env->exception_index = EXCP02_NMI;
6390 env->error_code = event_inj_err;
6391 env->exception_is_int = 0;
6392 env->exception_next_eip = EIP;
6393 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6394 cpu_loop_exit();
6395 break;
6396 case SVM_EVTINJ_TYPE_EXEPT:
6397 env->exception_index = vector;
6398 env->error_code = event_inj_err;
6399 env->exception_is_int = 0;
6400 env->exception_next_eip = -1;
6401 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6402 cpu_loop_exit();
6403 break;
6404 case SVM_EVTINJ_TYPE_SOFT:
6405 env->exception_index = vector;
6406 env->error_code = event_inj_err;
6407 env->exception_is_int = 1;
6408 env->exception_next_eip = EIP;
6409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6410 cpu_loop_exit();
6411 break;
6412 }
6413 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6414 }
6415}
6416
6417void helper_vmmcall(void)
6418{
6419 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6420 raise_exception(EXCP06_ILLOP);
6421}
6422
6423void helper_vmload(int aflag)
6424{
6425 target_ulong addr;
6426 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6427
6428 if (aflag == 2)
6429 addr = EAX;
6430 else
6431 addr = (uint32_t)EAX;
6432
6433 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6434 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6435 env->segs[R_FS].base);
6436
6437 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6438 env, R_FS);
6439 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6440 env, R_GS);
6441 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6442 &env->tr);
6443 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6444 &env->ldt);
6445
6446#ifdef TARGET_X86_64
6447 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6448 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6449 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6450 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6451#endif
6452 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6453 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6454 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6455 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6456}
6457
6458void helper_vmsave(int aflag)
6459{
6460 target_ulong addr;
6461 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6462
6463 if (aflag == 2)
6464 addr = EAX;
6465 else
6466 addr = (uint32_t)EAX;
6467
6468 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6469 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6470 env->segs[R_FS].base);
6471
6472 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6473 &env->segs[R_FS]);
6474 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6475 &env->segs[R_GS]);
6476 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6477 &env->tr);
6478 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6479 &env->ldt);
6480
6481#ifdef TARGET_X86_64
6482 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6483 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6484 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6485 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6486#endif
6487 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6488 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6489 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6490 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6491}
6492
6493void helper_stgi(void)
6494{
6495 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6496 env->hflags2 |= HF2_GIF_MASK;
6497}
6498
6499void helper_clgi(void)
6500{
6501 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6502 env->hflags2 &= ~HF2_GIF_MASK;
6503}
6504
6505void helper_skinit(void)
6506{
6507 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6508 /* XXX: not implemented */
6509 raise_exception(EXCP06_ILLOP);
6510}
6511
6512void helper_invlpga(int aflag)
6513{
6514 target_ulong addr;
6515 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6516
6517 if (aflag == 2)
6518 addr = EAX;
6519 else
6520 addr = (uint32_t)EAX;
6521
6522 /* XXX: could use the ASID to see if it is needed to do the
6523 flush */
6524 tlb_flush_page(env, addr);
6525}
6526
6527void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6528{
6529 if (likely(!(env->hflags & HF_SVMI_MASK)))
6530 return;
6531#ifndef VBOX
6532 switch(type) {
6533 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6534 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6535 helper_vmexit(type, param);
6536 }
6537 break;
6538 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6539 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6540 helper_vmexit(type, param);
6541 }
6542 break;
6543 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6544 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6545 helper_vmexit(type, param);
6546 }
6547 break;
6548 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6549 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6550 helper_vmexit(type, param);
6551 }
6552 break;
6553 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6554 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6555 helper_vmexit(type, param);
6556 }
6557 break;
6558 case SVM_EXIT_MSR:
6559 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6560 /* FIXME: this should be read in at vmrun (faster this way?) */
6561 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6562 uint32_t t0, t1;
6563 switch((uint32_t)ECX) {
6564 case 0 ... 0x1fff:
6565 t0 = (ECX * 2) % 8;
6566 t1 = ECX / 8;
6567 break;
6568 case 0xc0000000 ... 0xc0001fff:
6569 t0 = (8192 + ECX - 0xc0000000) * 2;
6570 t1 = (t0 / 8);
6571 t0 %= 8;
6572 break;
6573 case 0xc0010000 ... 0xc0011fff:
6574 t0 = (16384 + ECX - 0xc0010000) * 2;
6575 t1 = (t0 / 8);
6576 t0 %= 8;
6577 break;
6578 default:
6579 helper_vmexit(type, param);
6580 t0 = 0;
6581 t1 = 0;
6582 break;
6583 }
6584 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6585 helper_vmexit(type, param);
6586 }
6587 break;
6588 default:
6589 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6590 helper_vmexit(type, param);
6591 }
6592 break;
6593 }
6594#else /* VBOX */
6595 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6596#endif /* VBOX */
6597}
6598
6599void helper_svm_check_io(uint32_t port, uint32_t param,
6600 uint32_t next_eip_addend)
6601{
6602 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6603 /* FIXME: this should be read in at vmrun (faster this way?) */
6604 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6605 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6606 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6607 /* next EIP */
6608 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6609 env->eip + next_eip_addend);
6610 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6611 }
6612 }
6613}
6614
6615/* Note: currently only 32 bits of exit_code are used */
6616void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6617{
6618 uint32_t int_ctl;
6619
6620 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6621 exit_code, exit_info_1,
6622 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6623 EIP);
6624
6625 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6626 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6627 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6628 } else {
6629 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6630 }
6631
6632 /* Save the VM state in the vmcb */
6633 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6634 &env->segs[R_ES]);
6635 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6636 &env->segs[R_CS]);
6637 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6638 &env->segs[R_SS]);
6639 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6640 &env->segs[R_DS]);
6641
6642 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6643 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6644
6645 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6646 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6647
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6652 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6653
6654 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6655 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6656 int_ctl |= env->v_tpr & V_TPR_MASK;
6657 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6658 int_ctl |= V_IRQ_MASK;
6659 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6660
6661 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6662 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6663 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6665 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6666 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6667 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6668
6669 /* Reload the host state from vm_hsave */
6670 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6671 env->hflags &= ~HF_SVMI_MASK;
6672 env->intercept = 0;
6673 env->intercept_exceptions = 0;
6674 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6675 env->tsc_offset = 0;
6676
6677 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6678 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6679
6680 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6681 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6682
6683 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6684 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6685 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6686 /* we need to set the efer after the crs so the hidden flags get
6687 set properly */
6688 cpu_load_efer(env,
6689 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6690 env->eflags = 0;
6691 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6692 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6693 CC_OP = CC_OP_EFLAGS;
6694
6695 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6696 env, R_ES);
6697 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6698 env, R_CS);
6699 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6700 env, R_SS);
6701 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6702 env, R_DS);
6703
6704 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6705 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6706 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6707
6708 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6709 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6710
6711 /* other setups */
6712 cpu_x86_set_cpl(env, 0);
6713 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6714 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6715
6716 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6717 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6718 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6719 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6720 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6721
6722 env->hflags2 &= ~HF2_GIF_MASK;
6723 /* FIXME: Resets the current ASID register to zero (host ASID). */
6724
6725 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6726
6727 /* Clears the TSC_OFFSET inside the processor. */
6728
6729 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6730 from the page table indicated the host's CR3. If the PDPEs contain
6731 illegal state, the processor causes a shutdown. */
6732
6733 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6734 env->cr[0] |= CR0_PE_MASK;
6735 env->eflags &= ~VM_MASK;
6736
6737 /* Disables all breakpoints in the host DR7 register. */
6738
6739 /* Checks the reloaded host state for consistency. */
6740
6741 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6742 host's code segment or non-canonical (in the case of long mode), a
6743 #GP fault is delivered inside the host.) */
6744
6745 /* remove any pending exception */
6746 env->exception_index = -1;
6747 env->error_code = 0;
6748 env->old_exception = -1;
6749
6750 cpu_loop_exit();
6751}
6752
6753#endif
6754
6755/* MMX/SSE */
6756/* XXX: optimize by storing fptt and fptags in the static cpu state */
6757void helper_enter_mmx(void)
6758{
6759 env->fpstt = 0;
6760 *(uint32_t *)(env->fptags) = 0;
6761 *(uint32_t *)(env->fptags + 4) = 0;
6762}
6763
6764void helper_emms(void)
6765{
6766 /* set to empty state */
6767 *(uint32_t *)(env->fptags) = 0x01010101;
6768 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6769}
6770
6771/* XXX: suppress */
6772void helper_movq(void *d, void *s)
6773{
6774 *(uint64_t *)d = *(uint64_t *)s;
6775}
6776
6777#define SHIFT 0
6778#include "ops_sse.h"
6779
6780#define SHIFT 1
6781#include "ops_sse.h"
6782
6783#define SHIFT 0
6784#include "helper_template.h"
6785#undef SHIFT
6786
6787#define SHIFT 1
6788#include "helper_template.h"
6789#undef SHIFT
6790
6791#define SHIFT 2
6792#include "helper_template.h"
6793#undef SHIFT
6794
6795#ifdef TARGET_X86_64
6796
6797#define SHIFT 3
6798#include "helper_template.h"
6799#undef SHIFT
6800
6801#endif
6802
6803/* bit operations */
6804target_ulong helper_bsf(target_ulong t0)
6805{
6806 int count;
6807 target_ulong res;
6808
6809 res = t0;
6810 count = 0;
6811 while ((res & 1) == 0) {
6812 count++;
6813 res >>= 1;
6814 }
6815 return count;
6816}
6817
6818target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6819{
6820 int count;
6821 target_ulong res, mask;
6822
6823 if (wordsize > 0 && t0 == 0) {
6824 return wordsize;
6825 }
6826 res = t0;
6827 count = TARGET_LONG_BITS - 1;
6828 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6829 while ((res & mask) == 0) {
6830 count--;
6831 res <<= 1;
6832 }
6833 if (wordsize > 0) {
6834 return wordsize - 1 - count;
6835 }
6836 return count;
6837}
6838
6839target_ulong helper_bsr(target_ulong t0)
6840{
6841 return helper_lzcnt(t0, 0);
6842}
6843
6844static int compute_all_eflags(void)
6845{
6846 return CC_SRC;
6847}
6848
6849static int compute_c_eflags(void)
6850{
6851 return CC_SRC & CC_C;
6852}
6853
6854uint32_t helper_cc_compute_all(int op)
6855{
6856 switch (op) {
6857 default: /* should never happen */ return 0;
6858
6859 case CC_OP_EFLAGS: return compute_all_eflags();
6860
6861 case CC_OP_MULB: return compute_all_mulb();
6862 case CC_OP_MULW: return compute_all_mulw();
6863 case CC_OP_MULL: return compute_all_mull();
6864
6865 case CC_OP_ADDB: return compute_all_addb();
6866 case CC_OP_ADDW: return compute_all_addw();
6867 case CC_OP_ADDL: return compute_all_addl();
6868
6869 case CC_OP_ADCB: return compute_all_adcb();
6870 case CC_OP_ADCW: return compute_all_adcw();
6871 case CC_OP_ADCL: return compute_all_adcl();
6872
6873 case CC_OP_SUBB: return compute_all_subb();
6874 case CC_OP_SUBW: return compute_all_subw();
6875 case CC_OP_SUBL: return compute_all_subl();
6876
6877 case CC_OP_SBBB: return compute_all_sbbb();
6878 case CC_OP_SBBW: return compute_all_sbbw();
6879 case CC_OP_SBBL: return compute_all_sbbl();
6880
6881 case CC_OP_LOGICB: return compute_all_logicb();
6882 case CC_OP_LOGICW: return compute_all_logicw();
6883 case CC_OP_LOGICL: return compute_all_logicl();
6884
6885 case CC_OP_INCB: return compute_all_incb();
6886 case CC_OP_INCW: return compute_all_incw();
6887 case CC_OP_INCL: return compute_all_incl();
6888
6889 case CC_OP_DECB: return compute_all_decb();
6890 case CC_OP_DECW: return compute_all_decw();
6891 case CC_OP_DECL: return compute_all_decl();
6892
6893 case CC_OP_SHLB: return compute_all_shlb();
6894 case CC_OP_SHLW: return compute_all_shlw();
6895 case CC_OP_SHLL: return compute_all_shll();
6896
6897 case CC_OP_SARB: return compute_all_sarb();
6898 case CC_OP_SARW: return compute_all_sarw();
6899 case CC_OP_SARL: return compute_all_sarl();
6900
6901#ifdef TARGET_X86_64
6902 case CC_OP_MULQ: return compute_all_mulq();
6903
6904 case CC_OP_ADDQ: return compute_all_addq();
6905
6906 case CC_OP_ADCQ: return compute_all_adcq();
6907
6908 case CC_OP_SUBQ: return compute_all_subq();
6909
6910 case CC_OP_SBBQ: return compute_all_sbbq();
6911
6912 case CC_OP_LOGICQ: return compute_all_logicq();
6913
6914 case CC_OP_INCQ: return compute_all_incq();
6915
6916 case CC_OP_DECQ: return compute_all_decq();
6917
6918 case CC_OP_SHLQ: return compute_all_shlq();
6919
6920 case CC_OP_SARQ: return compute_all_sarq();
6921#endif
6922 }
6923}
6924
6925uint32_t helper_cc_compute_c(int op)
6926{
6927 switch (op) {
6928 default: /* should never happen */ return 0;
6929
6930 case CC_OP_EFLAGS: return compute_c_eflags();
6931
6932 case CC_OP_MULB: return compute_c_mull();
6933 case CC_OP_MULW: return compute_c_mull();
6934 case CC_OP_MULL: return compute_c_mull();
6935
6936 case CC_OP_ADDB: return compute_c_addb();
6937 case CC_OP_ADDW: return compute_c_addw();
6938 case CC_OP_ADDL: return compute_c_addl();
6939
6940 case CC_OP_ADCB: return compute_c_adcb();
6941 case CC_OP_ADCW: return compute_c_adcw();
6942 case CC_OP_ADCL: return compute_c_adcl();
6943
6944 case CC_OP_SUBB: return compute_c_subb();
6945 case CC_OP_SUBW: return compute_c_subw();
6946 case CC_OP_SUBL: return compute_c_subl();
6947
6948 case CC_OP_SBBB: return compute_c_sbbb();
6949 case CC_OP_SBBW: return compute_c_sbbw();
6950 case CC_OP_SBBL: return compute_c_sbbl();
6951
6952 case CC_OP_LOGICB: return compute_c_logicb();
6953 case CC_OP_LOGICW: return compute_c_logicw();
6954 case CC_OP_LOGICL: return compute_c_logicl();
6955
6956 case CC_OP_INCB: return compute_c_incl();
6957 case CC_OP_INCW: return compute_c_incl();
6958 case CC_OP_INCL: return compute_c_incl();
6959
6960 case CC_OP_DECB: return compute_c_incl();
6961 case CC_OP_DECW: return compute_c_incl();
6962 case CC_OP_DECL: return compute_c_incl();
6963
6964 case CC_OP_SHLB: return compute_c_shlb();
6965 case CC_OP_SHLW: return compute_c_shlw();
6966 case CC_OP_SHLL: return compute_c_shll();
6967
6968 case CC_OP_SARB: return compute_c_sarl();
6969 case CC_OP_SARW: return compute_c_sarl();
6970 case CC_OP_SARL: return compute_c_sarl();
6971
6972#ifdef TARGET_X86_64
6973 case CC_OP_MULQ: return compute_c_mull();
6974
6975 case CC_OP_ADDQ: return compute_c_addq();
6976
6977 case CC_OP_ADCQ: return compute_c_adcq();
6978
6979 case CC_OP_SUBQ: return compute_c_subq();
6980
6981 case CC_OP_SBBQ: return compute_c_sbbq();
6982
6983 case CC_OP_LOGICQ: return compute_c_logicq();
6984
6985 case CC_OP_INCQ: return compute_c_incl();
6986
6987 case CC_OP_DECQ: return compute_c_incl();
6988
6989 case CC_OP_SHLQ: return compute_c_shlq();
6990
6991 case CC_OP_SARQ: return compute_c_sarl();
6992#endif
6993 }
6994}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette