VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 36061

最後變更 在這個檔案從36061是 36061,由 vboxsync 提交於 14 年 前

REM: More .remstep logging.

  • 屬性 svn:eol-style 設為 native
檔案大小: 195.0 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "host-utils.h"
33
34#ifdef VBOX
35#include "qemu-common.h"
36#include <math.h>
37#include "tcg.h"
38#endif
39//#define DEBUG_PCALL
40
41#if 0
42#define raise_exception_err(a, b)\
43do {\
44 if (logfile)\
45 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
46 (raise_exception_err)(a, b);\
47} while (0)
48#endif
49
50const uint8_t parity_table[256] = {
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83};
84
85/* modulo 17 table */
86const uint8_t rclw_table[32] = {
87 0, 1, 2, 3, 4, 5, 6, 7,
88 8, 9,10,11,12,13,14,15,
89 16, 0, 1, 2, 3, 4, 5, 6,
90 7, 8, 9,10,11,12,13,14,
91};
92
93/* modulo 9 table */
94const uint8_t rclb_table[32] = {
95 0, 1, 2, 3, 4, 5, 6, 7,
96 8, 0, 1, 2, 3, 4, 5, 6,
97 7, 8, 0, 1, 2, 3, 4, 5,
98 6, 7, 8, 0, 1, 2, 3, 4,
99};
100
101const CPU86_LDouble f15rk[7] =
102{
103 0.00000000000000000000L,
104 1.00000000000000000000L,
105 3.14159265358979323851L, /*pi*/
106 0.30102999566398119523L, /*lg2*/
107 0.69314718055994530943L, /*ln2*/
108 1.44269504088896340739L, /*l2e*/
109 3.32192809488736234781L, /*l2t*/
110};
111
112/* broken thread support */
113
114spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
115
116void helper_lock(void)
117{
118 spin_lock(&global_cpu_lock);
119}
120
121void helper_unlock(void)
122{
123 spin_unlock(&global_cpu_lock);
124}
125
126void helper_write_eflags(target_ulong t0, uint32_t update_mask)
127{
128 load_eflags(t0, update_mask);
129}
130
131target_ulong helper_read_eflags(void)
132{
133 uint32_t eflags;
134 eflags = cc_table[CC_OP].compute_all();
135 eflags |= (DF & DF_MASK);
136 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
137 return eflags;
138}
139
140#ifdef VBOX
141void helper_write_eflags_vme(target_ulong t0)
142{
143 unsigned int new_eflags = t0;
144
145 assert(env->eflags & (1<<VM_SHIFT));
146
147 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
148 /* if TF will be set -> #GP */
149 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
150 || (new_eflags & TF_MASK)) {
151 raise_exception(EXCP0D_GPF);
152 } else {
153 load_eflags(new_eflags,
154 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
155
156 if (new_eflags & IF_MASK) {
157 env->eflags |= VIF_MASK;
158 } else {
159 env->eflags &= ~VIF_MASK;
160 }
161 }
162}
163
164target_ulong helper_read_eflags_vme(void)
165{
166 uint32_t eflags;
167 eflags = cc_table[CC_OP].compute_all();
168 eflags |= (DF & DF_MASK);
169 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
170 if (env->eflags & VIF_MASK)
171 eflags |= IF_MASK;
172 else
173 eflags &= ~IF_MASK;
174
175 /* According to AMD manual, should be read with IOPL == 3 */
176 eflags |= (3 << IOPL_SHIFT);
177
178 /* We only use helper_read_eflags_vme() in 16-bits mode */
179 return eflags & 0xffff;
180}
181
182void helper_dump_state()
183{
184 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
185 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
186 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
187 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
188 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
189 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
190 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
191}
192#endif
193
194/* return non zero if error */
195#ifndef VBOX
196static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
197#else /* VBOX */
198DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#endif /* VBOX */
200 int selector)
201{
202 SegmentCache *dt;
203 int index;
204 target_ulong ptr;
205
206#ifdef VBOX
207 /* Trying to load a selector with CPL=1? */
208 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
209 {
210 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
211 selector = selector & 0xfffc;
212 }
213#endif
214
215 if (selector & 0x4)
216 dt = &env->ldt;
217 else
218 dt = &env->gdt;
219 index = selector & ~7;
220 if ((index + 7) > dt->limit)
221 return -1;
222 ptr = dt->base + index;
223 *e1_ptr = ldl_kernel(ptr);
224 *e2_ptr = ldl_kernel(ptr + 4);
225 return 0;
226}
227
228#ifndef VBOX
229static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
230#else /* VBOX */
231DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
232#endif /* VBOX */
233{
234 unsigned int limit;
235 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
236 if (e2 & DESC_G_MASK)
237 limit = (limit << 12) | 0xfff;
238 return limit;
239}
240
241#ifndef VBOX
242static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
243#else /* VBOX */
244DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
245#endif /* VBOX */
246{
247 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
248}
249
250#ifndef VBOX
251static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
252#else /* VBOX */
253DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#endif /* VBOX */
255{
256 sc->base = get_seg_base(e1, e2);
257 sc->limit = get_seg_limit(e1, e2);
258 sc->flags = e2;
259}
260
261/* init the segment cache in vm86 mode. */
262#ifndef VBOX
263static inline void load_seg_vm(int seg, int selector)
264#else /* VBOX */
265DECLINLINE(void) load_seg_vm(int seg, int selector)
266#endif /* VBOX */
267{
268 selector &= 0xffff;
269#ifdef VBOX
270 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
271 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
272 flags |= (3 << DESC_DPL_SHIFT);
273
274 cpu_x86_load_seg_cache(env, seg, selector,
275 (selector << 4), 0xffff, flags);
276#else
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, 0);
279#endif
280}
281
282#ifndef VBOX
283static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
284#else /* VBOX */
285DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
286#endif /* VBOX */
287 uint32_t *esp_ptr, int dpl)
288{
289#ifndef VBOX
290 int type, index, shift;
291#else
292 unsigned int type, index, shift;
293#endif
294
295#if 0
296 {
297 int i;
298 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
299 for(i=0;i<env->tr.limit;i++) {
300 printf("%02x ", env->tr.base[i]);
301 if ((i & 7) == 7) printf("\n");
302 }
303 printf("\n");
304 }
305#endif
306
307 if (!(env->tr.flags & DESC_P_MASK))
308 cpu_abort(env, "invalid tss");
309 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
310 if ((type & 7) != 1)
311 cpu_abort(env, "invalid tss type");
312 shift = type >> 3;
313 index = (dpl * 4 + 2) << shift;
314 if (index + (4 << shift) - 1 > env->tr.limit)
315 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
316 if (shift == 0) {
317 *esp_ptr = lduw_kernel(env->tr.base + index);
318 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
319 } else {
320 *esp_ptr = ldl_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
322 }
323}
324
325/* XXX: merge with load_seg() */
326static void tss_load_seg(int seg_reg, int selector)
327{
328 uint32_t e1, e2;
329 int rpl, dpl, cpl;
330
331#ifdef VBOX
332 e1 = e2 = 0;
333 cpl = env->hflags & HF_CPL_MASK;
334 /* Trying to load a selector with CPL=1? */
335 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
336 {
337 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
338 selector = selector & 0xfffc;
339 }
340#endif
341
342 if ((selector & 0xfffc) != 0) {
343 if (load_segment(&e1, &e2, selector) != 0)
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if (!(e2 & DESC_S_MASK))
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 rpl = selector & 3;
348 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
349 cpl = env->hflags & HF_CPL_MASK;
350 if (seg_reg == R_CS) {
351 if (!(e2 & DESC_CS_MASK))
352 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
353 /* XXX: is it correct ? */
354 if (dpl != rpl)
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 if ((e2 & DESC_C_MASK) && dpl > rpl)
357 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
358 } else if (seg_reg == R_SS) {
359 /* SS must be writable data */
360 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
361 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
362 if (dpl != cpl || dpl != rpl)
363 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
364 } else {
365 /* not readable code */
366 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
367 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
368 /* if data or non conforming code, checks the rights */
369 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
370 if (dpl < cpl || dpl < rpl)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 }
373 }
374 if (!(e2 & DESC_P_MASK))
375 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
376 cpu_x86_load_seg_cache(env, seg_reg, selector,
377 get_seg_base(e1, e2),
378 get_seg_limit(e1, e2),
379 e2);
380 } else {
381 if (seg_reg == R_SS || seg_reg == R_CS)
382 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
383#ifdef VBOX
384#if 0
385 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
386 cpu_x86_load_seg_cache(env, seg_reg, selector,
387 0, 0, 0);
388#endif
389#endif
390 }
391}
392
393#define SWITCH_TSS_JMP 0
394#define SWITCH_TSS_IRET 1
395#define SWITCH_TSS_CALL 2
396
397/* XXX: restore CPU state in registers (PowerPC case) */
398static void switch_tss(int tss_selector,
399 uint32_t e1, uint32_t e2, int source,
400 uint32_t next_eip)
401{
402 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
403 target_ulong tss_base;
404 uint32_t new_regs[8], new_segs[6];
405 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
406 uint32_t old_eflags, eflags_mask;
407 SegmentCache *dt;
408#ifndef VBOX
409 int index;
410#else
411 unsigned int index;
412#endif
413 target_ulong ptr;
414
415 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
416#ifdef DEBUG_PCALL
417 if (loglevel & CPU_LOG_PCALL)
418 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
419#endif
420
421#if defined(VBOX) && defined(DEBUG)
422 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
423#endif
424
425 /* if task gate, we read the TSS segment and we load it */
426 if (type == 5) {
427 if (!(e2 & DESC_P_MASK))
428 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
429 tss_selector = e1 >> 16;
430 if (tss_selector & 4)
431 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
432 if (load_segment(&e1, &e2, tss_selector) != 0)
433 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
434 if (e2 & DESC_S_MASK)
435 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
436 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
437 if ((type & 7) != 1)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 }
440
441 if (!(e2 & DESC_P_MASK))
442 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
443
444 if (type & 8)
445 tss_limit_max = 103;
446 else
447 tss_limit_max = 43;
448 tss_limit = get_seg_limit(e1, e2);
449 tss_base = get_seg_base(e1, e2);
450 if ((tss_selector & 4) != 0 ||
451 tss_limit < tss_limit_max)
452 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
453 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
454 if (old_type & 8)
455 old_tss_limit_max = 103;
456 else
457 old_tss_limit_max = 43;
458
459 /* read all the registers from the new TSS */
460 if (type & 8) {
461 /* 32 bit */
462 new_cr3 = ldl_kernel(tss_base + 0x1c);
463 new_eip = ldl_kernel(tss_base + 0x20);
464 new_eflags = ldl_kernel(tss_base + 0x24);
465 for(i = 0; i < 8; i++)
466 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
467 for(i = 0; i < 6; i++)
468 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
469 new_ldt = lduw_kernel(tss_base + 0x60);
470 new_trap = ldl_kernel(tss_base + 0x64);
471 } else {
472 /* 16 bit */
473 new_cr3 = 0;
474 new_eip = lduw_kernel(tss_base + 0x0e);
475 new_eflags = lduw_kernel(tss_base + 0x10);
476 for(i = 0; i < 8; i++)
477 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
478 for(i = 0; i < 4; i++)
479 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
480 new_ldt = lduw_kernel(tss_base + 0x2a);
481 new_segs[R_FS] = 0;
482 new_segs[R_GS] = 0;
483 new_trap = 0;
484 }
485
486 /* NOTE: we must avoid memory exceptions during the task switch,
487 so we make dummy accesses before */
488 /* XXX: it can still fail in some cases, so a bigger hack is
489 necessary to valid the TLB after having done the accesses */
490
491 v1 = ldub_kernel(env->tr.base);
492 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
493 stb_kernel(env->tr.base, v1);
494 stb_kernel(env->tr.base + old_tss_limit_max, v2);
495
496 /* clear busy bit (it is restartable) */
497 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
498 target_ulong ptr;
499 uint32_t e2;
500 ptr = env->gdt.base + (env->tr.selector & ~7);
501 e2 = ldl_kernel(ptr + 4);
502 e2 &= ~DESC_TSS_BUSY_MASK;
503 stl_kernel(ptr + 4, e2);
504 }
505 old_eflags = compute_eflags();
506 if (source == SWITCH_TSS_IRET)
507 old_eflags &= ~NT_MASK;
508
509 /* save the current state in the old TSS */
510 if (type & 8) {
511 /* 32 bit */
512 stl_kernel(env->tr.base + 0x20, next_eip);
513 stl_kernel(env->tr.base + 0x24, old_eflags);
514 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
515 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
516 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
517 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
518 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
519 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
520 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
521 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
522 for(i = 0; i < 6; i++)
523 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
524#ifdef VBOX
525 /* Must store the ldt as it gets reloaded and might have been changed. */
526 stw_kernel(env->tr.base + 0x60, env->ldt.selector);
527#endif
528#if defined(VBOX) && defined(DEBUG)
529 printf("TSS 32 bits switch\n");
530 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
531#endif
532 } else {
533 /* 16 bit */
534 stw_kernel(env->tr.base + 0x0e, next_eip);
535 stw_kernel(env->tr.base + 0x10, old_eflags);
536 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
537 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
538 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
539 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
540 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
541 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
542 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
543 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
544 for(i = 0; i < 4; i++)
545 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
546#ifdef VBOX
547 /* Must store the ldt as it gets reloaded and might have been changed. */
548 stw_kernel(env->tr.base + 0x2a, env->ldt.selector);
549#endif
550 }
551
552 /* now if an exception occurs, it will occurs in the next task
553 context */
554
555 if (source == SWITCH_TSS_CALL) {
556 stw_kernel(tss_base, env->tr.selector);
557 new_eflags |= NT_MASK;
558 }
559
560 /* set busy bit */
561 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
562 target_ulong ptr;
563 uint32_t e2;
564 ptr = env->gdt.base + (tss_selector & ~7);
565 e2 = ldl_kernel(ptr + 4);
566 e2 |= DESC_TSS_BUSY_MASK;
567 stl_kernel(ptr + 4, e2);
568 }
569
570 /* set the new CPU state */
571 /* from this point, any exception which occurs can give problems */
572 env->cr[0] |= CR0_TS_MASK;
573 env->hflags |= HF_TS_MASK;
574 env->tr.selector = tss_selector;
575 env->tr.base = tss_base;
576 env->tr.limit = tss_limit;
577 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
578
579 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
580 cpu_x86_update_cr3(env, new_cr3);
581 }
582
583 /* load all registers without an exception, then reload them with
584 possible exception */
585 env->eip = new_eip;
586 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
587 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
588 if (!(type & 8))
589 eflags_mask &= 0xffff;
590 load_eflags(new_eflags, eflags_mask);
591 /* XXX: what to do in 16 bit case ? */
592 EAX = new_regs[0];
593 ECX = new_regs[1];
594 EDX = new_regs[2];
595 EBX = new_regs[3];
596 ESP = new_regs[4];
597 EBP = new_regs[5];
598 ESI = new_regs[6];
599 EDI = new_regs[7];
600 if (new_eflags & VM_MASK) {
601 for(i = 0; i < 6; i++)
602 load_seg_vm(i, new_segs[i]);
603 /* in vm86, CPL is always 3 */
604 cpu_x86_set_cpl(env, 3);
605 } else {
606 /* CPL is set the RPL of CS */
607 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
608 /* first just selectors as the rest may trigger exceptions */
609 for(i = 0; i < 6; i++)
610 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
611 }
612
613 env->ldt.selector = new_ldt & ~4;
614 env->ldt.base = 0;
615 env->ldt.limit = 0;
616 env->ldt.flags = 0;
617
618 /* load the LDT */
619 if (new_ldt & 4)
620 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
621
622 if ((new_ldt & 0xfffc) != 0) {
623 dt = &env->gdt;
624 index = new_ldt & ~7;
625 if ((index + 7) > dt->limit)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 ptr = dt->base + index;
628 e1 = ldl_kernel(ptr);
629 e2 = ldl_kernel(ptr + 4);
630 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
631 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
632 if (!(e2 & DESC_P_MASK))
633 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
634 load_seg_cache_raw_dt(&env->ldt, e1, e2);
635 }
636
637 /* load the segments */
638 if (!(new_eflags & VM_MASK)) {
639 tss_load_seg(R_CS, new_segs[R_CS]);
640 tss_load_seg(R_SS, new_segs[R_SS]);
641 tss_load_seg(R_ES, new_segs[R_ES]);
642 tss_load_seg(R_DS, new_segs[R_DS]);
643 tss_load_seg(R_FS, new_segs[R_FS]);
644 tss_load_seg(R_GS, new_segs[R_GS]);
645 }
646
647 /* check that EIP is in the CS segment limits */
648 if (new_eip > env->segs[R_CS].limit) {
649 /* XXX: different exception if CALL ? */
650 raise_exception_err(EXCP0D_GPF, 0);
651 }
652}
653
654/* check if Port I/O is allowed in TSS */
655#ifndef VBOX
656static inline void check_io(int addr, int size)
657{
658 int io_offset, val, mask;
659
660#else /* VBOX */
661DECLINLINE(void) check_io(int addr, int size)
662{
663 int val, mask;
664 unsigned int io_offset;
665#endif /* VBOX */
666 /* TSS must be a valid 32 bit one */
667 if (!(env->tr.flags & DESC_P_MASK) ||
668 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
669 env->tr.limit < 103)
670 goto fail;
671 io_offset = lduw_kernel(env->tr.base + 0x66);
672 io_offset += (addr >> 3);
673 /* Note: the check needs two bytes */
674 if ((io_offset + 1) > env->tr.limit)
675 goto fail;
676 val = lduw_kernel(env->tr.base + io_offset);
677 val >>= (addr & 7);
678 mask = (1 << size) - 1;
679 /* all bits must be zero to allow the I/O */
680 if ((val & mask) != 0) {
681 fail:
682 raise_exception_err(EXCP0D_GPF, 0);
683 }
684}
685
686#ifdef VBOX
687/* Keep in sync with gen_check_external_event() */
688void helper_check_external_event()
689{
690 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
691 | CPU_INTERRUPT_EXTERNAL_TIMER
692 | CPU_INTERRUPT_EXTERNAL_DMA))
693 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
694 && (env->eflags & IF_MASK)
695 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
696 {
697 helper_external_event();
698 }
699
700}
701
702void helper_sync_seg(uint32_t reg)
703{
704 if (env->segs[reg].newselector)
705 sync_seg(env, reg, env->segs[reg].newselector);
706}
707#endif
708
709void helper_check_iob(uint32_t t0)
710{
711 check_io(t0, 1);
712}
713
714void helper_check_iow(uint32_t t0)
715{
716 check_io(t0, 2);
717}
718
719void helper_check_iol(uint32_t t0)
720{
721 check_io(t0, 4);
722}
723
724void helper_outb(uint32_t port, uint32_t data)
725{
726 cpu_outb(env, port, data & 0xff);
727}
728
729target_ulong helper_inb(uint32_t port)
730{
731 return cpu_inb(env, port);
732}
733
734void helper_outw(uint32_t port, uint32_t data)
735{
736 cpu_outw(env, port, data & 0xffff);
737}
738
739target_ulong helper_inw(uint32_t port)
740{
741 return cpu_inw(env, port);
742}
743
744void helper_outl(uint32_t port, uint32_t data)
745{
746 cpu_outl(env, port, data);
747}
748
749target_ulong helper_inl(uint32_t port)
750{
751 return cpu_inl(env, port);
752}
753
754#ifndef VBOX
755static inline unsigned int get_sp_mask(unsigned int e2)
756#else /* VBOX */
757DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
758#endif /* VBOX */
759{
760 if (e2 & DESC_B_MASK)
761 return 0xffffffff;
762 else
763 return 0xffff;
764}
765
766#ifdef TARGET_X86_64
767#define SET_ESP(val, sp_mask)\
768do {\
769 if ((sp_mask) == 0xffff)\
770 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
771 else if ((sp_mask) == 0xffffffffLL)\
772 ESP = (uint32_t)(val);\
773 else\
774 ESP = (val);\
775} while (0)
776#else
777#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
778#endif
779
780/* in 64-bit machines, this can overflow. So this segment addition macro
781 * can be used to trim the value to 32-bit whenever needed */
782#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
783
784/* XXX: add a is_user flag to have proper security support */
785#define PUSHW(ssp, sp, sp_mask, val)\
786{\
787 sp -= 2;\
788 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
789}
790
791#define PUSHL(ssp, sp, sp_mask, val)\
792{\
793 sp -= 4;\
794 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
795}
796
797#define POPW(ssp, sp, sp_mask, val)\
798{\
799 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
800 sp += 2;\
801}
802
803#define POPL(ssp, sp, sp_mask, val)\
804{\
805 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
806 sp += 4;\
807}
808
809/* protected mode interrupt */
810static void do_interrupt_protected(int intno, int is_int, int error_code,
811 unsigned int next_eip, int is_hw)
812{
813 SegmentCache *dt;
814 target_ulong ptr, ssp;
815 int type, dpl, selector, ss_dpl, cpl;
816 int has_error_code, new_stack, shift;
817 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
818 uint32_t old_eip, sp_mask;
819
820#ifdef VBOX
821 ss = ss_e1 = ss_e2 = 0;
822 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
823 cpu_loop_exit();
824#endif
825
826 has_error_code = 0;
827 if (!is_int && !is_hw) {
828 switch(intno) {
829 case 8:
830 case 10:
831 case 11:
832 case 12:
833 case 13:
834 case 14:
835 case 17:
836 has_error_code = 1;
837 break;
838 }
839 }
840 if (is_int)
841 old_eip = next_eip;
842 else
843 old_eip = env->eip;
844
845 dt = &env->idt;
846#ifndef VBOX
847 if (intno * 8 + 7 > dt->limit)
848#else
849 if ((unsigned)intno * 8 + 7 > dt->limit)
850#endif
851 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
852 ptr = dt->base + intno * 8;
853 e1 = ldl_kernel(ptr);
854 e2 = ldl_kernel(ptr + 4);
855 /* check gate type */
856 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
857 switch(type) {
858 case 5: /* task gate */
859 /* must do that check here to return the correct error code */
860 if (!(e2 & DESC_P_MASK))
861 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
862 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
863 if (has_error_code) {
864 int type;
865 uint32_t mask;
866 /* push the error code */
867 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
868 shift = type >> 3;
869 if (env->segs[R_SS].flags & DESC_B_MASK)
870 mask = 0xffffffff;
871 else
872 mask = 0xffff;
873 esp = (ESP - (2 << shift)) & mask;
874 ssp = env->segs[R_SS].base + esp;
875 if (shift)
876 stl_kernel(ssp, error_code);
877 else
878 stw_kernel(ssp, error_code);
879 SET_ESP(esp, mask);
880 }
881 return;
882 case 6: /* 286 interrupt gate */
883 case 7: /* 286 trap gate */
884 case 14: /* 386 interrupt gate */
885 case 15: /* 386 trap gate */
886 break;
887 default:
888 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
889 break;
890 }
891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892 cpl = env->hflags & HF_CPL_MASK;
893 /* check privilege if software int */
894 if (is_int && dpl < cpl)
895 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
896 /* check valid bit */
897 if (!(e2 & DESC_P_MASK))
898 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
899 selector = e1 >> 16;
900 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 if ((selector & 0xfffc) == 0)
902 raise_exception_err(EXCP0D_GPF, 0);
903
904 if (load_segment(&e1, &e2, selector) != 0)
905 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
906 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 if (dpl > cpl)
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
913 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
914 /* to inner privilege */
915 get_ss_esp_from_tss(&ss, &esp, dpl);
916 if ((ss & 0xfffc) == 0)
917 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
918 if ((ss & 3) != dpl)
919 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
920 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
921 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
922 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
923 if (ss_dpl != dpl)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 if (!(ss_e2 & DESC_S_MASK) ||
926 (ss_e2 & DESC_CS_MASK) ||
927 !(ss_e2 & DESC_W_MASK))
928 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
929 if (!(ss_e2 & DESC_P_MASK))
930#ifdef VBOX /* See page 3-477 of 253666.pdf */
931 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
932#else
933 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
934#endif
935 new_stack = 1;
936 sp_mask = get_sp_mask(ss_e2);
937 ssp = get_seg_base(ss_e1, ss_e2);
938#if defined(VBOX) && defined(DEBUG)
939 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
940#endif
941 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
942 /* to same privilege */
943 if (env->eflags & VM_MASK)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 new_stack = 0;
946 sp_mask = get_sp_mask(env->segs[R_SS].flags);
947 ssp = env->segs[R_SS].base;
948 esp = ESP;
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 sp_mask = 0; /* avoid warning */
954 ssp = 0; /* avoid warning */
955 esp = 0; /* avoid warning */
956 }
957
958 shift = type >> 3;
959
960#if 0
961 /* XXX: check that enough room is available */
962 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
963 if (env->eflags & VM_MASK)
964 push_size += 8;
965 push_size <<= shift;
966#endif
967 if (shift == 1) {
968 if (new_stack) {
969 if (env->eflags & VM_MASK) {
970 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
971 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
972 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
973 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
974 }
975 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
976 PUSHL(ssp, esp, sp_mask, ESP);
977 }
978 PUSHL(ssp, esp, sp_mask, compute_eflags());
979 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
980 PUSHL(ssp, esp, sp_mask, old_eip);
981 if (has_error_code) {
982 PUSHL(ssp, esp, sp_mask, error_code);
983 }
984 } else {
985 if (new_stack) {
986 if (env->eflags & VM_MASK) {
987 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
988 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
989 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
990 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
991 }
992 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
993 PUSHW(ssp, esp, sp_mask, ESP);
994 }
995 PUSHW(ssp, esp, sp_mask, compute_eflags());
996 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
997 PUSHW(ssp, esp, sp_mask, old_eip);
998 if (has_error_code) {
999 PUSHW(ssp, esp, sp_mask, error_code);
1000 }
1001 }
1002
1003 if (new_stack) {
1004 if (env->eflags & VM_MASK) {
1005 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1006 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1007 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1008 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1009 }
1010 ss = (ss & ~3) | dpl;
1011 cpu_x86_load_seg_cache(env, R_SS, ss,
1012 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1013 }
1014 SET_ESP(esp, sp_mask);
1015
1016 selector = (selector & ~3) | dpl;
1017 cpu_x86_load_seg_cache(env, R_CS, selector,
1018 get_seg_base(e1, e2),
1019 get_seg_limit(e1, e2),
1020 e2);
1021 cpu_x86_set_cpl(env, dpl);
1022 env->eip = offset;
1023
1024 /* interrupt gate clear IF mask */
1025 if ((type & 1) == 0) {
1026 env->eflags &= ~IF_MASK;
1027 }
1028#ifndef VBOX
1029 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1030#else
1031 /*
1032 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1033 * gets confused by seemingly changed EFLAGS. See #3491 and
1034 * public bug #2341.
1035 */
1036 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1037#endif
1038}
1039#ifdef VBOX
1040
1041/* check if VME interrupt redirection is enabled in TSS */
1042DECLINLINE(bool) is_vme_irq_redirected(int intno)
1043{
1044 unsigned int io_offset, intredir_offset;
1045 unsigned char val, mask;
1046
1047 /* TSS must be a valid 32 bit one */
1048 if (!(env->tr.flags & DESC_P_MASK) ||
1049 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1050 env->tr.limit < 103)
1051 goto fail;
1052 io_offset = lduw_kernel(env->tr.base + 0x66);
1053 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1054 if (io_offset < 0x68 + 0x20)
1055 io_offset = 0x68 + 0x20;
1056 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1057 intredir_offset = io_offset - 0x20;
1058
1059 intredir_offset += (intno >> 3);
1060 if ((intredir_offset) > env->tr.limit)
1061 goto fail;
1062
1063 val = ldub_kernel(env->tr.base + intredir_offset);
1064 mask = 1 << (unsigned char)(intno & 7);
1065
1066 /* bit set means no redirection. */
1067 if ((val & mask) != 0) {
1068 return false;
1069 }
1070 return true;
1071
1072fail:
1073 raise_exception_err(EXCP0D_GPF, 0);
1074 return true;
1075}
1076
1077/* V86 mode software interrupt with CR4.VME=1 */
1078static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1079{
1080 target_ulong ptr, ssp;
1081 int selector;
1082 uint32_t offset, esp;
1083 uint32_t old_cs, old_eflags;
1084 uint32_t iopl;
1085
1086 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1087
1088 if (!is_vme_irq_redirected(intno))
1089 {
1090 if (iopl == 3)
1091 {
1092 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1093 return;
1094 }
1095 else
1096 raise_exception_err(EXCP0D_GPF, 0);
1097 }
1098
1099 /* virtual mode idt is at linear address 0 */
1100 ptr = 0 + intno * 4;
1101 offset = lduw_kernel(ptr);
1102 selector = lduw_kernel(ptr + 2);
1103 esp = ESP;
1104 ssp = env->segs[R_SS].base;
1105 old_cs = env->segs[R_CS].selector;
1106
1107 old_eflags = compute_eflags();
1108 if (iopl < 3)
1109 {
1110 /* copy VIF into IF and set IOPL to 3 */
1111 if (env->eflags & VIF_MASK)
1112 old_eflags |= IF_MASK;
1113 else
1114 old_eflags &= ~IF_MASK;
1115
1116 old_eflags |= (3 << IOPL_SHIFT);
1117 }
1118
1119 /* XXX: use SS segment size ? */
1120 PUSHW(ssp, esp, 0xffff, old_eflags);
1121 PUSHW(ssp, esp, 0xffff, old_cs);
1122 PUSHW(ssp, esp, 0xffff, next_eip);
1123
1124 /* update processor state */
1125 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1126 env->eip = offset;
1127 env->segs[R_CS].selector = selector;
1128 env->segs[R_CS].base = (selector << 4);
1129 env->eflags &= ~(TF_MASK | RF_MASK);
1130
1131 if (iopl < 3)
1132 env->eflags &= ~VIF_MASK;
1133 else
1134 env->eflags &= ~IF_MASK;
1135}
1136#endif /* VBOX */
1137
1138#ifdef TARGET_X86_64
1139
1140#define PUSHQ(sp, val)\
1141{\
1142 sp -= 8;\
1143 stq_kernel(sp, (val));\
1144}
1145
1146#define POPQ(sp, val)\
1147{\
1148 val = ldq_kernel(sp);\
1149 sp += 8;\
1150}
1151
1152#ifndef VBOX
1153static inline target_ulong get_rsp_from_tss(int level)
1154#else /* VBOX */
1155DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1156#endif /* VBOX */
1157{
1158 int index;
1159
1160#if 0
1161 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1162 env->tr.base, env->tr.limit);
1163#endif
1164
1165 if (!(env->tr.flags & DESC_P_MASK))
1166 cpu_abort(env, "invalid tss");
1167 index = 8 * level + 4;
1168 if ((index + 7) > env->tr.limit)
1169 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1170 return ldq_kernel(env->tr.base + index);
1171}
1172
1173/* 64 bit interrupt */
1174static void do_interrupt64(int intno, int is_int, int error_code,
1175 target_ulong next_eip, int is_hw)
1176{
1177 SegmentCache *dt;
1178 target_ulong ptr;
1179 int type, dpl, selector, cpl, ist;
1180 int has_error_code, new_stack;
1181 uint32_t e1, e2, e3, ss;
1182 target_ulong old_eip, esp, offset;
1183
1184#ifdef VBOX
1185 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1186 cpu_loop_exit();
1187#endif
1188
1189 has_error_code = 0;
1190 if (!is_int && !is_hw) {
1191 switch(intno) {
1192 case 8:
1193 case 10:
1194 case 11:
1195 case 12:
1196 case 13:
1197 case 14:
1198 case 17:
1199 has_error_code = 1;
1200 break;
1201 }
1202 }
1203 if (is_int)
1204 old_eip = next_eip;
1205 else
1206 old_eip = env->eip;
1207
1208 dt = &env->idt;
1209 if (intno * 16 + 15 > dt->limit)
1210 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1211 ptr = dt->base + intno * 16;
1212 e1 = ldl_kernel(ptr);
1213 e2 = ldl_kernel(ptr + 4);
1214 e3 = ldl_kernel(ptr + 8);
1215 /* check gate type */
1216 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1217 switch(type) {
1218 case 14: /* 386 interrupt gate */
1219 case 15: /* 386 trap gate */
1220 break;
1221 default:
1222 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1223 break;
1224 }
1225 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1226 cpl = env->hflags & HF_CPL_MASK;
1227 /* check privilege if software int */
1228 if (is_int && dpl < cpl)
1229 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1230 /* check valid bit */
1231 if (!(e2 & DESC_P_MASK))
1232 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1233 selector = e1 >> 16;
1234 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1235 ist = e2 & 7;
1236 if ((selector & 0xfffc) == 0)
1237 raise_exception_err(EXCP0D_GPF, 0);
1238
1239 if (load_segment(&e1, &e2, selector) != 0)
1240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1241 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1243 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1244 if (dpl > cpl)
1245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1246 if (!(e2 & DESC_P_MASK))
1247 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1248 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1249 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1250 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1251 /* to inner privilege */
1252 if (ist != 0)
1253 esp = get_rsp_from_tss(ist + 3);
1254 else
1255 esp = get_rsp_from_tss(dpl);
1256 esp &= ~0xfLL; /* align stack */
1257 ss = 0;
1258 new_stack = 1;
1259 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1260 /* to same privilege */
1261 if (env->eflags & VM_MASK)
1262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1263 new_stack = 0;
1264 if (ist != 0)
1265 esp = get_rsp_from_tss(ist + 3);
1266 else
1267 esp = ESP;
1268 esp &= ~0xfLL; /* align stack */
1269 dpl = cpl;
1270 } else {
1271 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1272 new_stack = 0; /* avoid warning */
1273 esp = 0; /* avoid warning */
1274 }
1275
1276 PUSHQ(esp, env->segs[R_SS].selector);
1277 PUSHQ(esp, ESP);
1278 PUSHQ(esp, compute_eflags());
1279 PUSHQ(esp, env->segs[R_CS].selector);
1280 PUSHQ(esp, old_eip);
1281 if (has_error_code) {
1282 PUSHQ(esp, error_code);
1283 }
1284
1285 if (new_stack) {
1286 ss = 0 | dpl;
1287 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1288 }
1289 ESP = esp;
1290
1291 selector = (selector & ~3) | dpl;
1292 cpu_x86_load_seg_cache(env, R_CS, selector,
1293 get_seg_base(e1, e2),
1294 get_seg_limit(e1, e2),
1295 e2);
1296 cpu_x86_set_cpl(env, dpl);
1297 env->eip = offset;
1298
1299 /* interrupt gate clear IF mask */
1300 if ((type & 1) == 0) {
1301 env->eflags &= ~IF_MASK;
1302 }
1303
1304#ifndef VBOX
1305 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1306#else
1307 /*
1308 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1309 * gets confused by seemingly changed EFLAGS. See #3491 and
1310 * public bug #2341.
1311 */
1312 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1313#endif
1314}
1315#endif
1316
1317#if defined(CONFIG_USER_ONLY)
1318void helper_syscall(int next_eip_addend)
1319{
1320 env->exception_index = EXCP_SYSCALL;
1321 env->exception_next_eip = env->eip + next_eip_addend;
1322 cpu_loop_exit();
1323}
1324#else
1325void helper_syscall(int next_eip_addend)
1326{
1327 int selector;
1328
1329 if (!(env->efer & MSR_EFER_SCE)) {
1330 raise_exception_err(EXCP06_ILLOP, 0);
1331 }
1332 selector = (env->star >> 32) & 0xffff;
1333#ifdef TARGET_X86_64
1334 if (env->hflags & HF_LMA_MASK) {
1335 int code64;
1336
1337 ECX = env->eip + next_eip_addend;
1338 env->regs[11] = compute_eflags();
1339
1340 code64 = env->hflags & HF_CS64_MASK;
1341
1342 cpu_x86_set_cpl(env, 0);
1343 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1344 0, 0xffffffff,
1345 DESC_G_MASK | DESC_P_MASK |
1346 DESC_S_MASK |
1347 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1348 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1349 0, 0xffffffff,
1350 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1351 DESC_S_MASK |
1352 DESC_W_MASK | DESC_A_MASK);
1353 env->eflags &= ~env->fmask;
1354 load_eflags(env->eflags, 0);
1355 if (code64)
1356 env->eip = env->lstar;
1357 else
1358 env->eip = env->cstar;
1359 } else
1360#endif
1361 {
1362 ECX = (uint32_t)(env->eip + next_eip_addend);
1363
1364 cpu_x86_set_cpl(env, 0);
1365 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1366 0, 0xffffffff,
1367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1368 DESC_S_MASK |
1369 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1370 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1371 0, 0xffffffff,
1372 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1373 DESC_S_MASK |
1374 DESC_W_MASK | DESC_A_MASK);
1375 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1376 env->eip = (uint32_t)env->star;
1377 }
1378}
1379#endif
1380
1381void helper_sysret(int dflag)
1382{
1383 int cpl, selector;
1384
1385 if (!(env->efer & MSR_EFER_SCE)) {
1386 raise_exception_err(EXCP06_ILLOP, 0);
1387 }
1388 cpl = env->hflags & HF_CPL_MASK;
1389 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1390 raise_exception_err(EXCP0D_GPF, 0);
1391 }
1392 selector = (env->star >> 48) & 0xffff;
1393#ifdef TARGET_X86_64
1394 if (env->hflags & HF_LMA_MASK) {
1395 if (dflag == 2) {
1396 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1397 0, 0xffffffff,
1398 DESC_G_MASK | DESC_P_MASK |
1399 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1400 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1401 DESC_L_MASK);
1402 env->eip = ECX;
1403 } else {
1404 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1405 0, 0xffffffff,
1406 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1407 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1408 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1409 env->eip = (uint32_t)ECX;
1410 }
1411 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1412 0, 0xffffffff,
1413 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1414 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1415 DESC_W_MASK | DESC_A_MASK);
1416 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1417 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1418 cpu_x86_set_cpl(env, 3);
1419 } else
1420#endif
1421 {
1422 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1423 0, 0xffffffff,
1424 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1425 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1426 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1427 env->eip = (uint32_t)ECX;
1428 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1429 0, 0xffffffff,
1430 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1431 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1432 DESC_W_MASK | DESC_A_MASK);
1433 env->eflags |= IF_MASK;
1434 cpu_x86_set_cpl(env, 3);
1435 }
1436#ifdef USE_KQEMU
1437 if (kqemu_is_ok(env)) {
1438 if (env->hflags & HF_LMA_MASK)
1439 CC_OP = CC_OP_EFLAGS;
1440 env->exception_index = -1;
1441 cpu_loop_exit();
1442 }
1443#endif
1444}
1445
1446#ifdef VBOX
1447/**
1448 * Checks and processes external VMM events.
1449 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1450 */
1451void helper_external_event(void)
1452{
1453#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1454 uintptr_t uSP;
1455# ifdef RT_ARCH_AMD64
1456 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1457# else
1458 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1459# endif
1460 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1461#endif
1462 /* Keep in sync with flags checked by gen_check_external_event() */
1463 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1464 {
1465 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1466 ~CPU_INTERRUPT_EXTERNAL_HARD);
1467 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1468 }
1469 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1470 {
1471 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1472 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1473 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1474 }
1475 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1476 {
1477 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1478 ~CPU_INTERRUPT_EXTERNAL_DMA);
1479 remR3DmaRun(env);
1480 }
1481 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1482 {
1483 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1484 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1485 remR3TimersRun(env);
1486 }
1487}
1488/* helper for recording call instruction addresses for later scanning */
1489void helper_record_call()
1490{
1491 if ( !(env->state & CPU_RAW_RING0)
1492 && (env->cr[0] & CR0_PG_MASK)
1493 && !(env->eflags & X86_EFL_IF))
1494 remR3RecordCall(env);
1495}
1496#endif /* VBOX */
1497
1498/* real mode interrupt */
1499static void do_interrupt_real(int intno, int is_int, int error_code,
1500 unsigned int next_eip)
1501{
1502 SegmentCache *dt;
1503 target_ulong ptr, ssp;
1504 int selector;
1505 uint32_t offset, esp;
1506 uint32_t old_cs, old_eip;
1507
1508 /* real mode (simpler !) */
1509 dt = &env->idt;
1510#ifndef VBOX
1511 if (intno * 4 + 3 > dt->limit)
1512#else
1513 if ((unsigned)intno * 4 + 3 > dt->limit)
1514#endif
1515 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1516 ptr = dt->base + intno * 4;
1517 offset = lduw_kernel(ptr);
1518 selector = lduw_kernel(ptr + 2);
1519 esp = ESP;
1520 ssp = env->segs[R_SS].base;
1521 if (is_int)
1522 old_eip = next_eip;
1523 else
1524 old_eip = env->eip;
1525 old_cs = env->segs[R_CS].selector;
1526 /* XXX: use SS segment size ? */
1527 PUSHW(ssp, esp, 0xffff, compute_eflags());
1528 PUSHW(ssp, esp, 0xffff, old_cs);
1529 PUSHW(ssp, esp, 0xffff, old_eip);
1530
1531 /* update processor state */
1532 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1533 env->eip = offset;
1534 env->segs[R_CS].selector = selector;
1535 env->segs[R_CS].base = (selector << 4);
1536 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1537}
1538
1539/* fake user mode interrupt */
1540void do_interrupt_user(int intno, int is_int, int error_code,
1541 target_ulong next_eip)
1542{
1543 SegmentCache *dt;
1544 target_ulong ptr;
1545 int dpl, cpl, shift;
1546 uint32_t e2;
1547
1548 dt = &env->idt;
1549 if (env->hflags & HF_LMA_MASK) {
1550 shift = 4;
1551 } else {
1552 shift = 3;
1553 }
1554 ptr = dt->base + (intno << shift);
1555 e2 = ldl_kernel(ptr + 4);
1556
1557 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1558 cpl = env->hflags & HF_CPL_MASK;
1559 /* check privilege if software int */
1560 if (is_int && dpl < cpl)
1561 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1562
1563 /* Since we emulate only user space, we cannot do more than
1564 exiting the emulation with the suitable exception and error
1565 code */
1566 if (is_int)
1567 EIP = next_eip;
1568}
1569
1570/*
1571 * Begin execution of an interruption. is_int is TRUE if coming from
1572 * the int instruction. next_eip is the EIP value AFTER the interrupt
1573 * instruction. It is only relevant if is_int is TRUE.
1574 */
1575void do_interrupt(int intno, int is_int, int error_code,
1576 target_ulong next_eip, int is_hw)
1577{
1578 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1579 if (is_int) {
1580 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1581 intno, error_code, env->eip, is_hw ? " hw" : "");
1582 } else {
1583 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1584 intno, error_code, env->eip, next_eip, is_hw ? " hw" : "");
1585 }
1586 }
1587
1588 if (loglevel & CPU_LOG_INT) {
1589 if ((env->cr[0] & CR0_PE_MASK)) {
1590 static int count;
1591 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1592 count, intno, error_code, is_int,
1593 env->hflags & HF_CPL_MASK,
1594 env->segs[R_CS].selector, EIP,
1595 (int)env->segs[R_CS].base + EIP,
1596 env->segs[R_SS].selector, ESP);
1597 if (intno == 0x0e) {
1598 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1599 } else {
1600 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1601 }
1602 fprintf(logfile, "\n");
1603 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1604#if 0
1605 {
1606 int i;
1607 uint8_t *ptr;
1608 fprintf(logfile, " code=");
1609 ptr = env->segs[R_CS].base + env->eip;
1610 for(i = 0; i < 16; i++) {
1611 fprintf(logfile, " %02x", ldub(ptr + i));
1612 }
1613 fprintf(logfile, "\n");
1614 }
1615#endif
1616 count++;
1617 }
1618 }
1619 if (env->cr[0] & CR0_PE_MASK) {
1620#ifdef TARGET_X86_64
1621 if (env->hflags & HF_LMA_MASK) {
1622 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1623 } else
1624#endif
1625 {
1626#ifdef VBOX
1627 /* int xx *, v86 code and VME enabled? */
1628 if ( (env->eflags & VM_MASK)
1629 && (env->cr[4] & CR4_VME_MASK)
1630 && is_int
1631 && !is_hw
1632 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1633 )
1634 do_soft_interrupt_vme(intno, error_code, next_eip);
1635 else
1636#endif /* VBOX */
1637 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1638 }
1639 } else {
1640 do_interrupt_real(intno, is_int, error_code, next_eip);
1641 }
1642}
1643
1644/*
1645 * Check nested exceptions and change to double or triple fault if
1646 * needed. It should only be called, if this is not an interrupt.
1647 * Returns the new exception number.
1648 */
1649static int check_exception(int intno, int *error_code)
1650{
1651 int first_contributory = env->old_exception == 0 ||
1652 (env->old_exception >= 10 &&
1653 env->old_exception <= 13);
1654 int second_contributory = intno == 0 ||
1655 (intno >= 10 && intno <= 13);
1656
1657 if (loglevel & CPU_LOG_INT)
1658 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1659 env->old_exception, intno);
1660
1661 if (env->old_exception == EXCP08_DBLE)
1662 cpu_abort(env, "triple fault");
1663
1664 if ((first_contributory && second_contributory)
1665 || (env->old_exception == EXCP0E_PAGE &&
1666 (second_contributory || (intno == EXCP0E_PAGE)))) {
1667 intno = EXCP08_DBLE;
1668 *error_code = 0;
1669 }
1670
1671 if (second_contributory || (intno == EXCP0E_PAGE) ||
1672 (intno == EXCP08_DBLE))
1673 env->old_exception = intno;
1674
1675 return intno;
1676}
1677
1678/*
1679 * Signal an interruption. It is executed in the main CPU loop.
1680 * is_int is TRUE if coming from the int instruction. next_eip is the
1681 * EIP value AFTER the interrupt instruction. It is only relevant if
1682 * is_int is TRUE.
1683 */
1684void raise_interrupt(int intno, int is_int, int error_code,
1685 int next_eip_addend)
1686{
1687#if defined(VBOX) && defined(DEBUG)
1688 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1689#endif
1690 if (!is_int) {
1691 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1692 intno = check_exception(intno, &error_code);
1693 } else {
1694 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1695 }
1696
1697 env->exception_index = intno;
1698 env->error_code = error_code;
1699 env->exception_is_int = is_int;
1700 env->exception_next_eip = env->eip + next_eip_addend;
1701 cpu_loop_exit();
1702}
1703
1704/* shortcuts to generate exceptions */
1705
1706void (raise_exception_err)(int exception_index, int error_code)
1707{
1708 raise_interrupt(exception_index, 0, error_code, 0);
1709}
1710
1711void raise_exception(int exception_index)
1712{
1713 raise_interrupt(exception_index, 0, 0, 0);
1714}
1715
1716/* SMM support */
1717
1718#if defined(CONFIG_USER_ONLY)
1719
1720void do_smm_enter(void)
1721{
1722}
1723
1724void helper_rsm(void)
1725{
1726}
1727
1728#else
1729
1730#ifdef TARGET_X86_64
1731#define SMM_REVISION_ID 0x00020064
1732#else
1733#define SMM_REVISION_ID 0x00020000
1734#endif
1735
1736void do_smm_enter(void)
1737{
1738 target_ulong sm_state;
1739 SegmentCache *dt;
1740 int i, offset;
1741
1742 if (loglevel & CPU_LOG_INT) {
1743 fprintf(logfile, "SMM: enter\n");
1744 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1745 }
1746
1747 env->hflags |= HF_SMM_MASK;
1748 cpu_smm_update(env);
1749
1750 sm_state = env->smbase + 0x8000;
1751
1752#ifdef TARGET_X86_64
1753 for(i = 0; i < 6; i++) {
1754 dt = &env->segs[i];
1755 offset = 0x7e00 + i * 16;
1756 stw_phys(sm_state + offset, dt->selector);
1757 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1758 stl_phys(sm_state + offset + 4, dt->limit);
1759 stq_phys(sm_state + offset + 8, dt->base);
1760 }
1761
1762 stq_phys(sm_state + 0x7e68, env->gdt.base);
1763 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1764
1765 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1766 stq_phys(sm_state + 0x7e78, env->ldt.base);
1767 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1768 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1769
1770 stq_phys(sm_state + 0x7e88, env->idt.base);
1771 stl_phys(sm_state + 0x7e84, env->idt.limit);
1772
1773 stw_phys(sm_state + 0x7e90, env->tr.selector);
1774 stq_phys(sm_state + 0x7e98, env->tr.base);
1775 stl_phys(sm_state + 0x7e94, env->tr.limit);
1776 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1777
1778 stq_phys(sm_state + 0x7ed0, env->efer);
1779
1780 stq_phys(sm_state + 0x7ff8, EAX);
1781 stq_phys(sm_state + 0x7ff0, ECX);
1782 stq_phys(sm_state + 0x7fe8, EDX);
1783 stq_phys(sm_state + 0x7fe0, EBX);
1784 stq_phys(sm_state + 0x7fd8, ESP);
1785 stq_phys(sm_state + 0x7fd0, EBP);
1786 stq_phys(sm_state + 0x7fc8, ESI);
1787 stq_phys(sm_state + 0x7fc0, EDI);
1788 for(i = 8; i < 16; i++)
1789 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1790 stq_phys(sm_state + 0x7f78, env->eip);
1791 stl_phys(sm_state + 0x7f70, compute_eflags());
1792 stl_phys(sm_state + 0x7f68, env->dr[6]);
1793 stl_phys(sm_state + 0x7f60, env->dr[7]);
1794
1795 stl_phys(sm_state + 0x7f48, env->cr[4]);
1796 stl_phys(sm_state + 0x7f50, env->cr[3]);
1797 stl_phys(sm_state + 0x7f58, env->cr[0]);
1798
1799 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1800 stl_phys(sm_state + 0x7f00, env->smbase);
1801#else
1802 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1803 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1804 stl_phys(sm_state + 0x7ff4, compute_eflags());
1805 stl_phys(sm_state + 0x7ff0, env->eip);
1806 stl_phys(sm_state + 0x7fec, EDI);
1807 stl_phys(sm_state + 0x7fe8, ESI);
1808 stl_phys(sm_state + 0x7fe4, EBP);
1809 stl_phys(sm_state + 0x7fe0, ESP);
1810 stl_phys(sm_state + 0x7fdc, EBX);
1811 stl_phys(sm_state + 0x7fd8, EDX);
1812 stl_phys(sm_state + 0x7fd4, ECX);
1813 stl_phys(sm_state + 0x7fd0, EAX);
1814 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1815 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1816
1817 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1818 stl_phys(sm_state + 0x7f64, env->tr.base);
1819 stl_phys(sm_state + 0x7f60, env->tr.limit);
1820 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1821
1822 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1823 stl_phys(sm_state + 0x7f80, env->ldt.base);
1824 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1825 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1826
1827 stl_phys(sm_state + 0x7f74, env->gdt.base);
1828 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1829
1830 stl_phys(sm_state + 0x7f58, env->idt.base);
1831 stl_phys(sm_state + 0x7f54, env->idt.limit);
1832
1833 for(i = 0; i < 6; i++) {
1834 dt = &env->segs[i];
1835 if (i < 3)
1836 offset = 0x7f84 + i * 12;
1837 else
1838 offset = 0x7f2c + (i - 3) * 12;
1839 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1840 stl_phys(sm_state + offset + 8, dt->base);
1841 stl_phys(sm_state + offset + 4, dt->limit);
1842 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1843 }
1844 stl_phys(sm_state + 0x7f14, env->cr[4]);
1845
1846 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1847 stl_phys(sm_state + 0x7ef8, env->smbase);
1848#endif
1849 /* init SMM cpu state */
1850
1851#ifdef TARGET_X86_64
1852 cpu_load_efer(env, 0);
1853#endif
1854 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1855 env->eip = 0x00008000;
1856 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1857 0xffffffff, 0);
1858 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1859 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1860 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1861 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1862 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1863
1864 cpu_x86_update_cr0(env,
1865 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1866 cpu_x86_update_cr4(env, 0);
1867 env->dr[7] = 0x00000400;
1868 CC_OP = CC_OP_EFLAGS;
1869}
1870
1871void helper_rsm(void)
1872{
1873#ifdef VBOX
1874 cpu_abort(env, "helper_rsm");
1875#else /* !VBOX */
1876 target_ulong sm_
1877
1878 target_ulong sm_state;
1879 int i, offset;
1880 uint32_t val;
1881
1882 sm_state = env->smbase + 0x8000;
1883#ifdef TARGET_X86_64
1884 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1885
1886 for(i = 0; i < 6; i++) {
1887 offset = 0x7e00 + i * 16;
1888 cpu_x86_load_seg_cache(env, i,
1889 lduw_phys(sm_state + offset),
1890 ldq_phys(sm_state + offset + 8),
1891 ldl_phys(sm_state + offset + 4),
1892 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1893 }
1894
1895 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1896 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1897
1898 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1899 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1900 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1901 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1902
1903 env->idt.base = ldq_phys(sm_state + 0x7e88);
1904 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1905
1906 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1907 env->tr.base = ldq_phys(sm_state + 0x7e98);
1908 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1909 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1910
1911 EAX = ldq_phys(sm_state + 0x7ff8);
1912 ECX = ldq_phys(sm_state + 0x7ff0);
1913 EDX = ldq_phys(sm_state + 0x7fe8);
1914 EBX = ldq_phys(sm_state + 0x7fe0);
1915 ESP = ldq_phys(sm_state + 0x7fd8);
1916 EBP = ldq_phys(sm_state + 0x7fd0);
1917 ESI = ldq_phys(sm_state + 0x7fc8);
1918 EDI = ldq_phys(sm_state + 0x7fc0);
1919 for(i = 8; i < 16; i++)
1920 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1921 env->eip = ldq_phys(sm_state + 0x7f78);
1922 load_eflags(ldl_phys(sm_state + 0x7f70),
1923 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1924 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1925 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1926
1927 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1928 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1929 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1930
1931 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1932 if (val & 0x20000) {
1933 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1934 }
1935#else
1936 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1937 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1938 load_eflags(ldl_phys(sm_state + 0x7ff4),
1939 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1940 env->eip = ldl_phys(sm_state + 0x7ff0);
1941 EDI = ldl_phys(sm_state + 0x7fec);
1942 ESI = ldl_phys(sm_state + 0x7fe8);
1943 EBP = ldl_phys(sm_state + 0x7fe4);
1944 ESP = ldl_phys(sm_state + 0x7fe0);
1945 EBX = ldl_phys(sm_state + 0x7fdc);
1946 EDX = ldl_phys(sm_state + 0x7fd8);
1947 ECX = ldl_phys(sm_state + 0x7fd4);
1948 EAX = ldl_phys(sm_state + 0x7fd0);
1949 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1950 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1951
1952 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1953 env->tr.base = ldl_phys(sm_state + 0x7f64);
1954 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1955 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1956
1957 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1958 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1959 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1960 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1961
1962 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1963 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1964
1965 env->idt.base = ldl_phys(sm_state + 0x7f58);
1966 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1967
1968 for(i = 0; i < 6; i++) {
1969 if (i < 3)
1970 offset = 0x7f84 + i * 12;
1971 else
1972 offset = 0x7f2c + (i - 3) * 12;
1973 cpu_x86_load_seg_cache(env, i,
1974 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1975 ldl_phys(sm_state + offset + 8),
1976 ldl_phys(sm_state + offset + 4),
1977 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1978 }
1979 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1980
1981 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1982 if (val & 0x20000) {
1983 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1984 }
1985#endif
1986 CC_OP = CC_OP_EFLAGS;
1987 env->hflags &= ~HF_SMM_MASK;
1988 cpu_smm_update(env);
1989
1990 if (loglevel & CPU_LOG_INT) {
1991 fprintf(logfile, "SMM: after RSM\n");
1992 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1993 }
1994#endif /* !VBOX */
1995}
1996
1997#endif /* !CONFIG_USER_ONLY */
1998
1999
2000/* division, flags are undefined */
2001
2002void helper_divb_AL(target_ulong t0)
2003{
2004 unsigned int num, den, q, r;
2005
2006 num = (EAX & 0xffff);
2007 den = (t0 & 0xff);
2008 if (den == 0) {
2009 raise_exception(EXCP00_DIVZ);
2010 }
2011 q = (num / den);
2012 if (q > 0xff)
2013 raise_exception(EXCP00_DIVZ);
2014 q &= 0xff;
2015 r = (num % den) & 0xff;
2016 EAX = (EAX & ~0xffff) | (r << 8) | q;
2017}
2018
2019void helper_idivb_AL(target_ulong t0)
2020{
2021 int num, den, q, r;
2022
2023 num = (int16_t)EAX;
2024 den = (int8_t)t0;
2025 if (den == 0) {
2026 raise_exception(EXCP00_DIVZ);
2027 }
2028 q = (num / den);
2029 if (q != (int8_t)q)
2030 raise_exception(EXCP00_DIVZ);
2031 q &= 0xff;
2032 r = (num % den) & 0xff;
2033 EAX = (EAX & ~0xffff) | (r << 8) | q;
2034}
2035
2036void helper_divw_AX(target_ulong t0)
2037{
2038 unsigned int num, den, q, r;
2039
2040 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2041 den = (t0 & 0xffff);
2042 if (den == 0) {
2043 raise_exception(EXCP00_DIVZ);
2044 }
2045 q = (num / den);
2046 if (q > 0xffff)
2047 raise_exception(EXCP00_DIVZ);
2048 q &= 0xffff;
2049 r = (num % den) & 0xffff;
2050 EAX = (EAX & ~0xffff) | q;
2051 EDX = (EDX & ~0xffff) | r;
2052}
2053
2054void helper_idivw_AX(target_ulong t0)
2055{
2056 int num, den, q, r;
2057
2058 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2059 den = (int16_t)t0;
2060 if (den == 0) {
2061 raise_exception(EXCP00_DIVZ);
2062 }
2063 q = (num / den);
2064 if (q != (int16_t)q)
2065 raise_exception(EXCP00_DIVZ);
2066 q &= 0xffff;
2067 r = (num % den) & 0xffff;
2068 EAX = (EAX & ~0xffff) | q;
2069 EDX = (EDX & ~0xffff) | r;
2070}
2071
2072void helper_divl_EAX(target_ulong t0)
2073{
2074 unsigned int den, r;
2075 uint64_t num, q;
2076
2077 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2078 den = t0;
2079 if (den == 0) {
2080 raise_exception(EXCP00_DIVZ);
2081 }
2082 q = (num / den);
2083 r = (num % den);
2084 if (q > 0xffffffff)
2085 raise_exception(EXCP00_DIVZ);
2086 EAX = (uint32_t)q;
2087 EDX = (uint32_t)r;
2088}
2089
2090void helper_idivl_EAX(target_ulong t0)
2091{
2092 int den, r;
2093 int64_t num, q;
2094
2095 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2096 den = t0;
2097 if (den == 0) {
2098 raise_exception(EXCP00_DIVZ);
2099 }
2100 q = (num / den);
2101 r = (num % den);
2102 if (q != (int32_t)q)
2103 raise_exception(EXCP00_DIVZ);
2104 EAX = (uint32_t)q;
2105 EDX = (uint32_t)r;
2106}
2107
2108/* bcd */
2109
2110/* XXX: exception */
2111void helper_aam(int base)
2112{
2113 int al, ah;
2114 al = EAX & 0xff;
2115 ah = al / base;
2116 al = al % base;
2117 EAX = (EAX & ~0xffff) | al | (ah << 8);
2118 CC_DST = al;
2119}
2120
2121void helper_aad(int base)
2122{
2123 int al, ah;
2124 al = EAX & 0xff;
2125 ah = (EAX >> 8) & 0xff;
2126 al = ((ah * base) + al) & 0xff;
2127 EAX = (EAX & ~0xffff) | al;
2128 CC_DST = al;
2129}
2130
2131void helper_aaa(void)
2132{
2133 int icarry;
2134 int al, ah, af;
2135 int eflags;
2136
2137 eflags = cc_table[CC_OP].compute_all();
2138 af = eflags & CC_A;
2139 al = EAX & 0xff;
2140 ah = (EAX >> 8) & 0xff;
2141
2142 icarry = (al > 0xf9);
2143 if (((al & 0x0f) > 9 ) || af) {
2144 al = (al + 6) & 0x0f;
2145 ah = (ah + 1 + icarry) & 0xff;
2146 eflags |= CC_C | CC_A;
2147 } else {
2148 eflags &= ~(CC_C | CC_A);
2149 al &= 0x0f;
2150 }
2151 EAX = (EAX & ~0xffff) | al | (ah << 8);
2152 CC_SRC = eflags;
2153 FORCE_RET();
2154}
2155
2156void helper_aas(void)
2157{
2158 int icarry;
2159 int al, ah, af;
2160 int eflags;
2161
2162 eflags = cc_table[CC_OP].compute_all();
2163 af = eflags & CC_A;
2164 al = EAX & 0xff;
2165 ah = (EAX >> 8) & 0xff;
2166
2167 icarry = (al < 6);
2168 if (((al & 0x0f) > 9 ) || af) {
2169 al = (al - 6) & 0x0f;
2170 ah = (ah - 1 - icarry) & 0xff;
2171 eflags |= CC_C | CC_A;
2172 } else {
2173 eflags &= ~(CC_C | CC_A);
2174 al &= 0x0f;
2175 }
2176 EAX = (EAX & ~0xffff) | al | (ah << 8);
2177 CC_SRC = eflags;
2178 FORCE_RET();
2179}
2180
2181void helper_daa(void)
2182{
2183 int al, af, cf;
2184 int eflags;
2185
2186 eflags = cc_table[CC_OP].compute_all();
2187 cf = eflags & CC_C;
2188 af = eflags & CC_A;
2189 al = EAX & 0xff;
2190
2191 eflags = 0;
2192 if (((al & 0x0f) > 9 ) || af) {
2193 al = (al + 6) & 0xff;
2194 eflags |= CC_A;
2195 }
2196 if ((al > 0x9f) || cf) {
2197 al = (al + 0x60) & 0xff;
2198 eflags |= CC_C;
2199 }
2200 EAX = (EAX & ~0xff) | al;
2201 /* well, speed is not an issue here, so we compute the flags by hand */
2202 eflags |= (al == 0) << 6; /* zf */
2203 eflags |= parity_table[al]; /* pf */
2204 eflags |= (al & 0x80); /* sf */
2205 CC_SRC = eflags;
2206 FORCE_RET();
2207}
2208
2209void helper_das(void)
2210{
2211 int al, al1, af, cf;
2212 int eflags;
2213
2214 eflags = cc_table[CC_OP].compute_all();
2215 cf = eflags & CC_C;
2216 af = eflags & CC_A;
2217 al = EAX & 0xff;
2218
2219 eflags = 0;
2220 al1 = al;
2221 if (((al & 0x0f) > 9 ) || af) {
2222 eflags |= CC_A;
2223 if (al < 6 || cf)
2224 eflags |= CC_C;
2225 al = (al - 6) & 0xff;
2226 }
2227 if ((al1 > 0x99) || cf) {
2228 al = (al - 0x60) & 0xff;
2229 eflags |= CC_C;
2230 }
2231 EAX = (EAX & ~0xff) | al;
2232 /* well, speed is not an issue here, so we compute the flags by hand */
2233 eflags |= (al == 0) << 6; /* zf */
2234 eflags |= parity_table[al]; /* pf */
2235 eflags |= (al & 0x80); /* sf */
2236 CC_SRC = eflags;
2237 FORCE_RET();
2238}
2239
2240void helper_into(int next_eip_addend)
2241{
2242 int eflags;
2243 eflags = cc_table[CC_OP].compute_all();
2244 if (eflags & CC_O) {
2245 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2246 }
2247}
2248
2249void helper_cmpxchg8b(target_ulong a0)
2250{
2251 uint64_t d;
2252 int eflags;
2253
2254 eflags = cc_table[CC_OP].compute_all();
2255 d = ldq(a0);
2256 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2257 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2258 eflags |= CC_Z;
2259 } else {
2260 /* always do the store */
2261 stq(a0, d);
2262 EDX = (uint32_t)(d >> 32);
2263 EAX = (uint32_t)d;
2264 eflags &= ~CC_Z;
2265 }
2266 CC_SRC = eflags;
2267}
2268
2269#ifdef TARGET_X86_64
2270void helper_cmpxchg16b(target_ulong a0)
2271{
2272 uint64_t d0, d1;
2273 int eflags;
2274
2275 if ((a0 & 0xf) != 0)
2276 raise_exception(EXCP0D_GPF);
2277 eflags = cc_table[CC_OP].compute_all();
2278 d0 = ldq(a0);
2279 d1 = ldq(a0 + 8);
2280 if (d0 == EAX && d1 == EDX) {
2281 stq(a0, EBX);
2282 stq(a0 + 8, ECX);
2283 eflags |= CC_Z;
2284 } else {
2285 /* always do the store */
2286 stq(a0, d0);
2287 stq(a0 + 8, d1);
2288 EDX = d1;
2289 EAX = d0;
2290 eflags &= ~CC_Z;
2291 }
2292 CC_SRC = eflags;
2293}
2294#endif
2295
2296void helper_single_step(void)
2297{
2298 env->dr[6] |= 0x4000;
2299 raise_exception(EXCP01_SSTP);
2300}
2301
2302void helper_cpuid(void)
2303{
2304#ifndef VBOX
2305 uint32_t index;
2306
2307 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2308
2309 index = (uint32_t)EAX;
2310 /* test if maximum index reached */
2311 if (index & 0x80000000) {
2312 if (index > env->cpuid_xlevel)
2313 index = env->cpuid_level;
2314 } else {
2315 if (index > env->cpuid_level)
2316 index = env->cpuid_level;
2317 }
2318
2319 switch(index) {
2320 case 0:
2321 EAX = env->cpuid_level;
2322 EBX = env->cpuid_vendor1;
2323 EDX = env->cpuid_vendor2;
2324 ECX = env->cpuid_vendor3;
2325 break;
2326 case 1:
2327 EAX = env->cpuid_version;
2328 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2329 ECX = env->cpuid_ext_features;
2330 EDX = env->cpuid_features;
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 EAX = 1;
2335 EBX = 0;
2336 ECX = 0;
2337 EDX = 0x2c307d;
2338 break;
2339 case 4:
2340 /* cache info: needed for Core compatibility */
2341 switch (ECX) {
2342 case 0: /* L1 dcache info */
2343 EAX = 0x0000121;
2344 EBX = 0x1c0003f;
2345 ECX = 0x000003f;
2346 EDX = 0x0000001;
2347 break;
2348 case 1: /* L1 icache info */
2349 EAX = 0x0000122;
2350 EBX = 0x1c0003f;
2351 ECX = 0x000003f;
2352 EDX = 0x0000001;
2353 break;
2354 case 2: /* L2 cache info */
2355 EAX = 0x0000143;
2356 EBX = 0x3c0003f;
2357 ECX = 0x0000fff;
2358 EDX = 0x0000001;
2359 break;
2360 default: /* end of info */
2361 EAX = 0;
2362 EBX = 0;
2363 ECX = 0;
2364 EDX = 0;
2365 break;
2366 }
2367
2368 break;
2369 case 5:
2370 /* mwait info: needed for Core compatibility */
2371 EAX = 0; /* Smallest monitor-line size in bytes */
2372 EBX = 0; /* Largest monitor-line size in bytes */
2373 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2374 EDX = 0;
2375 break;
2376 case 6:
2377 /* Thermal and Power Leaf */
2378 EAX = 0;
2379 EBX = 0;
2380 ECX = 0;
2381 EDX = 0;
2382 break;
2383 case 9:
2384 /* Direct Cache Access Information Leaf */
2385 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2386 EBX = 0;
2387 ECX = 0;
2388 EDX = 0;
2389 break;
2390 case 0xA:
2391 /* Architectural Performance Monitoring Leaf */
2392 EAX = 0;
2393 EBX = 0;
2394 ECX = 0;
2395 EDX = 0;
2396 break;
2397 case 0x80000000:
2398 EAX = env->cpuid_xlevel;
2399 EBX = env->cpuid_vendor1;
2400 EDX = env->cpuid_vendor2;
2401 ECX = env->cpuid_vendor3;
2402 break;
2403 case 0x80000001:
2404 EAX = env->cpuid_features;
2405 EBX = 0;
2406 ECX = env->cpuid_ext3_features;
2407 EDX = env->cpuid_ext2_features;
2408 break;
2409 case 0x80000002:
2410 case 0x80000003:
2411 case 0x80000004:
2412 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2413 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2414 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2415 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2416 break;
2417 case 0x80000005:
2418 /* cache info (L1 cache) */
2419 EAX = 0x01ff01ff;
2420 EBX = 0x01ff01ff;
2421 ECX = 0x40020140;
2422 EDX = 0x40020140;
2423 break;
2424 case 0x80000006:
2425 /* cache info (L2 cache) */
2426 EAX = 0;
2427 EBX = 0x42004200;
2428 ECX = 0x02008140;
2429 EDX = 0;
2430 break;
2431 case 0x80000008:
2432 /* virtual & phys address size in low 2 bytes. */
2433/* XXX: This value must match the one used in the MMU code. */
2434 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2435 /* 64 bit processor */
2436#if defined(USE_KQEMU)
2437 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2438#else
2439/* XXX: The physical address space is limited to 42 bits in exec.c. */
2440 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2441#endif
2442 } else {
2443#if defined(USE_KQEMU)
2444 EAX = 0x00000020; /* 32 bits physical */
2445#else
2446 if (env->cpuid_features & CPUID_PSE36)
2447 EAX = 0x00000024; /* 36 bits physical */
2448 else
2449 EAX = 0x00000020; /* 32 bits physical */
2450#endif
2451 }
2452 EBX = 0;
2453 ECX = 0;
2454 EDX = 0;
2455 break;
2456 case 0x8000000A:
2457 EAX = 0x00000001;
2458 EBX = 0;
2459 ECX = 0;
2460 EDX = 0;
2461 break;
2462 default:
2463 /* reserved values: zero */
2464 EAX = 0;
2465 EBX = 0;
2466 ECX = 0;
2467 EDX = 0;
2468 break;
2469 }
2470#else /* VBOX */
2471 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2472#endif /* VBOX */
2473}
2474
2475void helper_enter_level(int level, int data32, target_ulong t1)
2476{
2477 target_ulong ssp;
2478 uint32_t esp_mask, esp, ebp;
2479
2480 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2481 ssp = env->segs[R_SS].base;
2482 ebp = EBP;
2483 esp = ESP;
2484 if (data32) {
2485 /* 32 bit */
2486 esp -= 4;
2487 while (--level) {
2488 esp -= 4;
2489 ebp -= 4;
2490 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2491 }
2492 esp -= 4;
2493 stl(ssp + (esp & esp_mask), t1);
2494 } else {
2495 /* 16 bit */
2496 esp -= 2;
2497 while (--level) {
2498 esp -= 2;
2499 ebp -= 2;
2500 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2501 }
2502 esp -= 2;
2503 stw(ssp + (esp & esp_mask), t1);
2504 }
2505}
2506
2507#ifdef TARGET_X86_64
2508void helper_enter64_level(int level, int data64, target_ulong t1)
2509{
2510 target_ulong esp, ebp;
2511 ebp = EBP;
2512 esp = ESP;
2513
2514 if (data64) {
2515 /* 64 bit */
2516 esp -= 8;
2517 while (--level) {
2518 esp -= 8;
2519 ebp -= 8;
2520 stq(esp, ldq(ebp));
2521 }
2522 esp -= 8;
2523 stq(esp, t1);
2524 } else {
2525 /* 16 bit */
2526 esp -= 2;
2527 while (--level) {
2528 esp -= 2;
2529 ebp -= 2;
2530 stw(esp, lduw(ebp));
2531 }
2532 esp -= 2;
2533 stw(esp, t1);
2534 }
2535}
2536#endif
2537
2538void helper_lldt(int selector)
2539{
2540 SegmentCache *dt;
2541 uint32_t e1, e2;
2542#ifndef VBOX
2543 int index, entry_limit;
2544#else
2545 unsigned int index, entry_limit;
2546#endif
2547 target_ulong ptr;
2548
2549#ifdef VBOX
2550 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2551 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2552#endif
2553
2554 selector &= 0xffff;
2555 if ((selector & 0xfffc) == 0) {
2556 /* XXX: NULL selector case: invalid LDT */
2557 env->ldt.base = 0;
2558 env->ldt.limit = 0;
2559 } else {
2560 if (selector & 0x4)
2561 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2562 dt = &env->gdt;
2563 index = selector & ~7;
2564#ifdef TARGET_X86_64
2565 if (env->hflags & HF_LMA_MASK)
2566 entry_limit = 15;
2567 else
2568#endif
2569 entry_limit = 7;
2570 if ((index + entry_limit) > dt->limit)
2571 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2572 ptr = dt->base + index;
2573 e1 = ldl_kernel(ptr);
2574 e2 = ldl_kernel(ptr + 4);
2575 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 if (!(e2 & DESC_P_MASK))
2578 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2579#ifdef TARGET_X86_64
2580 if (env->hflags & HF_LMA_MASK) {
2581 uint32_t e3;
2582 e3 = ldl_kernel(ptr + 8);
2583 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2584 env->ldt.base |= (target_ulong)e3 << 32;
2585 } else
2586#endif
2587 {
2588 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2589 }
2590 }
2591 env->ldt.selector = selector;
2592#ifdef VBOX
2593 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2594 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2595#endif
2596}
2597
2598void helper_ltr(int selector)
2599{
2600 SegmentCache *dt;
2601 uint32_t e1, e2;
2602#ifndef VBOX
2603 int index, type, entry_limit;
2604#else
2605 unsigned int index;
2606 int type, entry_limit;
2607#endif
2608 target_ulong ptr;
2609
2610#ifdef VBOX
2611 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2612 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2613 env->tr.flags, (RTSEL)(selector & 0xffff)));
2614#endif
2615 selector &= 0xffff;
2616 if ((selector & 0xfffc) == 0) {
2617 /* NULL selector case: invalid TR */
2618 env->tr.base = 0;
2619 env->tr.limit = 0;
2620 env->tr.flags = 0;
2621 } else {
2622 if (selector & 0x4)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 dt = &env->gdt;
2625 index = selector & ~7;
2626#ifdef TARGET_X86_64
2627 if (env->hflags & HF_LMA_MASK)
2628 entry_limit = 15;
2629 else
2630#endif
2631 entry_limit = 7;
2632 if ((index + entry_limit) > dt->limit)
2633 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2634 ptr = dt->base + index;
2635 e1 = ldl_kernel(ptr);
2636 e2 = ldl_kernel(ptr + 4);
2637 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2638 if ((e2 & DESC_S_MASK) ||
2639 (type != 1 && type != 9))
2640 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2641 if (!(e2 & DESC_P_MASK))
2642 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2643#ifdef TARGET_X86_64
2644 if (env->hflags & HF_LMA_MASK) {
2645 uint32_t e3, e4;
2646 e3 = ldl_kernel(ptr + 8);
2647 e4 = ldl_kernel(ptr + 12);
2648 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2649 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2650 load_seg_cache_raw_dt(&env->tr, e1, e2);
2651 env->tr.base |= (target_ulong)e3 << 32;
2652 } else
2653#endif
2654 {
2655 load_seg_cache_raw_dt(&env->tr, e1, e2);
2656 }
2657 e2 |= DESC_TSS_BUSY_MASK;
2658 stl_kernel(ptr + 4, e2);
2659 }
2660 env->tr.selector = selector;
2661#ifdef VBOX
2662 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2663 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2664 env->tr.flags, (RTSEL)(selector & 0xffff)));
2665#endif
2666}
2667
2668/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2669void helper_load_seg(int seg_reg, int selector)
2670{
2671 uint32_t e1, e2;
2672 int cpl, dpl, rpl;
2673 SegmentCache *dt;
2674#ifndef VBOX
2675 int index;
2676#else
2677 unsigned int index;
2678#endif
2679 target_ulong ptr;
2680
2681 selector &= 0xffff;
2682 cpl = env->hflags & HF_CPL_MASK;
2683
2684#ifdef VBOX
2685 /* Trying to load a selector with CPL=1? */
2686 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2687 {
2688 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2689 selector = selector & 0xfffc;
2690 }
2691#endif
2692 if ((selector & 0xfffc) == 0) {
2693 /* null selector case */
2694 if (seg_reg == R_SS
2695#ifdef TARGET_X86_64
2696 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2697#endif
2698 )
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2701 } else {
2702
2703 if (selector & 0x4)
2704 dt = &env->ldt;
2705 else
2706 dt = &env->gdt;
2707 index = selector & ~7;
2708 if ((index + 7) > dt->limit)
2709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2710 ptr = dt->base + index;
2711 e1 = ldl_kernel(ptr);
2712 e2 = ldl_kernel(ptr + 4);
2713
2714 if (!(e2 & DESC_S_MASK))
2715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2716 rpl = selector & 3;
2717 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2718 if (seg_reg == R_SS) {
2719 /* must be writable segment */
2720 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2721 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2722 if (rpl != cpl || dpl != cpl)
2723 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2724 } else {
2725 /* must be readable segment */
2726 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2728
2729 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2730 /* if not conforming code, test rights */
2731 if (dpl < cpl || dpl < rpl)
2732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2733 }
2734 }
2735
2736 if (!(e2 & DESC_P_MASK)) {
2737 if (seg_reg == R_SS)
2738 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2739 else
2740 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2741 }
2742
2743 /* set the access bit if not already set */
2744 if (!(e2 & DESC_A_MASK)) {
2745 e2 |= DESC_A_MASK;
2746 stl_kernel(ptr + 4, e2);
2747 }
2748
2749 cpu_x86_load_seg_cache(env, seg_reg, selector,
2750 get_seg_base(e1, e2),
2751 get_seg_limit(e1, e2),
2752 e2);
2753#if 0
2754 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2755 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2756#endif
2757 }
2758}
2759
2760/* protected mode jump */
2761void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2762 int next_eip_addend)
2763{
2764 int gate_cs, type;
2765 uint32_t e1, e2, cpl, dpl, rpl, limit;
2766 target_ulong next_eip;
2767
2768#ifdef VBOX
2769 e1 = e2 = 0;
2770#endif
2771 if ((new_cs & 0xfffc) == 0)
2772 raise_exception_err(EXCP0D_GPF, 0);
2773 if (load_segment(&e1, &e2, new_cs) != 0)
2774 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775 cpl = env->hflags & HF_CPL_MASK;
2776 if (e2 & DESC_S_MASK) {
2777 if (!(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2780 if (e2 & DESC_C_MASK) {
2781 /* conforming code segment */
2782 if (dpl > cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 } else {
2785 /* non conforming code segment */
2786 rpl = new_cs & 3;
2787 if (rpl > cpl)
2788 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2789 if (dpl != cpl)
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 }
2792 if (!(e2 & DESC_P_MASK))
2793 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 limit = get_seg_limit(e1, e2);
2795 if (new_eip > limit &&
2796 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2798 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2799 get_seg_base(e1, e2), limit, e2);
2800 EIP = new_eip;
2801 } else {
2802 /* jump to call or task gate */
2803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2804 rpl = new_cs & 3;
2805 cpl = env->hflags & HF_CPL_MASK;
2806 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2807 switch(type) {
2808 case 1: /* 286 TSS */
2809 case 9: /* 386 TSS */
2810 case 5: /* task gate */
2811 if (dpl < cpl || dpl < rpl)
2812 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2813 next_eip = env->eip + next_eip_addend;
2814 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2815 CC_OP = CC_OP_EFLAGS;
2816 break;
2817 case 4: /* 286 call gate */
2818 case 12: /* 386 call gate */
2819 if ((dpl < cpl) || (dpl < rpl))
2820 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2821 if (!(e2 & DESC_P_MASK))
2822 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2823 gate_cs = e1 >> 16;
2824 new_eip = (e1 & 0xffff);
2825 if (type == 12)
2826 new_eip |= (e2 & 0xffff0000);
2827 if (load_segment(&e1, &e2, gate_cs) != 0)
2828 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2829 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2830 /* must be code segment */
2831 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2832 (DESC_S_MASK | DESC_CS_MASK)))
2833 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2834 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2835 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2836 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2837 if (!(e2 & DESC_P_MASK))
2838#ifdef VBOX /* See page 3-514 of 253666.pdf */
2839 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2840#else
2841 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2842#endif
2843 limit = get_seg_limit(e1, e2);
2844 if (new_eip > limit)
2845 raise_exception_err(EXCP0D_GPF, 0);
2846 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2847 get_seg_base(e1, e2), limit, e2);
2848 EIP = new_eip;
2849 break;
2850 default:
2851 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2852 break;
2853 }
2854 }
2855}
2856
2857/* real mode call */
2858void helper_lcall_real(int new_cs, target_ulong new_eip1,
2859 int shift, int next_eip)
2860{
2861 int new_eip;
2862 uint32_t esp, esp_mask;
2863 target_ulong ssp;
2864
2865 new_eip = new_eip1;
2866 esp = ESP;
2867 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2868 ssp = env->segs[R_SS].base;
2869 if (shift) {
2870 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2871 PUSHL(ssp, esp, esp_mask, next_eip);
2872 } else {
2873 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2874 PUSHW(ssp, esp, esp_mask, next_eip);
2875 }
2876
2877 SET_ESP(esp, esp_mask);
2878 env->eip = new_eip;
2879 env->segs[R_CS].selector = new_cs;
2880 env->segs[R_CS].base = (new_cs << 4);
2881}
2882
2883/* protected mode call */
2884void helper_lcall_protected(int new_cs, target_ulong new_eip,
2885 int shift, int next_eip_addend)
2886{
2887 int new_stack, i;
2888 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2889 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2890 uint32_t val, limit, old_sp_mask;
2891 target_ulong ssp, old_ssp, next_eip;
2892
2893#ifdef VBOX
2894 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2895#endif
2896 next_eip = env->eip + next_eip_addend;
2897#ifdef DEBUG_PCALL
2898 if (loglevel & CPU_LOG_PCALL) {
2899 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2900 new_cs, (uint32_t)new_eip, shift);
2901 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2902 }
2903#endif
2904 if ((new_cs & 0xfffc) == 0)
2905 raise_exception_err(EXCP0D_GPF, 0);
2906 if (load_segment(&e1, &e2, new_cs) != 0)
2907 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2908 cpl = env->hflags & HF_CPL_MASK;
2909#ifdef DEBUG_PCALL
2910 if (loglevel & CPU_LOG_PCALL) {
2911 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2912 }
2913#endif
2914 if (e2 & DESC_S_MASK) {
2915 if (!(e2 & DESC_CS_MASK))
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2918 if (e2 & DESC_C_MASK) {
2919 /* conforming code segment */
2920 if (dpl > cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 } else {
2923 /* non conforming code segment */
2924 rpl = new_cs & 3;
2925 if (rpl > cpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 if (dpl != cpl)
2928 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2929 }
2930 if (!(e2 & DESC_P_MASK))
2931 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2932
2933#ifdef TARGET_X86_64
2934 /* XXX: check 16/32 bit cases in long mode */
2935 if (shift == 2) {
2936 target_ulong rsp;
2937 /* 64 bit case */
2938 rsp = ESP;
2939 PUSHQ(rsp, env->segs[R_CS].selector);
2940 PUSHQ(rsp, next_eip);
2941 /* from this point, not restartable */
2942 ESP = rsp;
2943 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2944 get_seg_base(e1, e2),
2945 get_seg_limit(e1, e2), e2);
2946 EIP = new_eip;
2947 } else
2948#endif
2949 {
2950 sp = ESP;
2951 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2952 ssp = env->segs[R_SS].base;
2953 if (shift) {
2954 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2955 PUSHL(ssp, sp, sp_mask, next_eip);
2956 } else {
2957 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2958 PUSHW(ssp, sp, sp_mask, next_eip);
2959 }
2960
2961 limit = get_seg_limit(e1, e2);
2962 if (new_eip > limit)
2963 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2964 /* from this point, not restartable */
2965 SET_ESP(sp, sp_mask);
2966 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2967 get_seg_base(e1, e2), limit, e2);
2968 EIP = new_eip;
2969 }
2970 } else {
2971 /* check gate type */
2972 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2974 rpl = new_cs & 3;
2975 switch(type) {
2976 case 1: /* available 286 TSS */
2977 case 9: /* available 386 TSS */
2978 case 5: /* task gate */
2979 if (dpl < cpl || dpl < rpl)
2980 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2981 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2982 CC_OP = CC_OP_EFLAGS;
2983 return;
2984 case 4: /* 286 call gate */
2985 case 12: /* 386 call gate */
2986 break;
2987 default:
2988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2989 break;
2990 }
2991 shift = type >> 3;
2992
2993 if (dpl < cpl || dpl < rpl)
2994 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2995 /* check valid bit */
2996 if (!(e2 & DESC_P_MASK))
2997 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2998 selector = e1 >> 16;
2999 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3000 param_count = e2 & 0x1f;
3001 if ((selector & 0xfffc) == 0)
3002 raise_exception_err(EXCP0D_GPF, 0);
3003
3004 if (load_segment(&e1, &e2, selector) != 0)
3005 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3006 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3007 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3008 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009 if (dpl > cpl)
3010 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3011 if (!(e2 & DESC_P_MASK))
3012 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3013
3014 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3015 /* to inner privilege */
3016 get_ss_esp_from_tss(&ss, &sp, dpl);
3017#ifdef DEBUG_PCALL
3018 if (loglevel & CPU_LOG_PCALL)
3019 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3020 ss, sp, param_count, ESP);
3021#endif
3022 if ((ss & 0xfffc) == 0)
3023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3024 if ((ss & 3) != dpl)
3025 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3026 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3027 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3028 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3029 if (ss_dpl != dpl)
3030 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3031 if (!(ss_e2 & DESC_S_MASK) ||
3032 (ss_e2 & DESC_CS_MASK) ||
3033 !(ss_e2 & DESC_W_MASK))
3034 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3035 if (!(ss_e2 & DESC_P_MASK))
3036#ifdef VBOX /* See page 3-99 of 253666.pdf */
3037 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3038#else
3039 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3040#endif
3041
3042 // push_size = ((param_count * 2) + 8) << shift;
3043
3044 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3045 old_ssp = env->segs[R_SS].base;
3046
3047 sp_mask = get_sp_mask(ss_e2);
3048 ssp = get_seg_base(ss_e1, ss_e2);
3049 if (shift) {
3050 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3051 PUSHL(ssp, sp, sp_mask, ESP);
3052 for(i = param_count - 1; i >= 0; i--) {
3053 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3054 PUSHL(ssp, sp, sp_mask, val);
3055 }
3056 } else {
3057 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3058 PUSHW(ssp, sp, sp_mask, ESP);
3059 for(i = param_count - 1; i >= 0; i--) {
3060 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3061 PUSHW(ssp, sp, sp_mask, val);
3062 }
3063 }
3064 new_stack = 1;
3065 } else {
3066 /* to same privilege */
3067 sp = ESP;
3068 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3069 ssp = env->segs[R_SS].base;
3070 // push_size = (4 << shift);
3071 new_stack = 0;
3072 }
3073
3074 if (shift) {
3075 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3076 PUSHL(ssp, sp, sp_mask, next_eip);
3077 } else {
3078 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3079 PUSHW(ssp, sp, sp_mask, next_eip);
3080 }
3081
3082 /* from this point, not restartable */
3083
3084 if (new_stack) {
3085 ss = (ss & ~3) | dpl;
3086 cpu_x86_load_seg_cache(env, R_SS, ss,
3087 ssp,
3088 get_seg_limit(ss_e1, ss_e2),
3089 ss_e2);
3090 }
3091
3092 selector = (selector & ~3) | dpl;
3093 cpu_x86_load_seg_cache(env, R_CS, selector,
3094 get_seg_base(e1, e2),
3095 get_seg_limit(e1, e2),
3096 e2);
3097 cpu_x86_set_cpl(env, dpl);
3098 SET_ESP(sp, sp_mask);
3099 EIP = offset;
3100 }
3101#ifdef USE_KQEMU
3102 if (kqemu_is_ok(env)) {
3103 env->exception_index = -1;
3104 cpu_loop_exit();
3105 }
3106#endif
3107}
3108
3109/* real and vm86 mode iret */
3110void helper_iret_real(int shift)
3111{
3112 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3113 target_ulong ssp;
3114 int eflags_mask;
3115#ifdef VBOX
3116 bool fVME = false;
3117
3118 remR3TrapClear(env->pVM);
3119#endif /* VBOX */
3120
3121 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3122 sp = ESP;
3123 ssp = env->segs[R_SS].base;
3124 if (shift == 1) {
3125 /* 32 bits */
3126 POPL(ssp, sp, sp_mask, new_eip);
3127 POPL(ssp, sp, sp_mask, new_cs);
3128 new_cs &= 0xffff;
3129 POPL(ssp, sp, sp_mask, new_eflags);
3130 } else {
3131 /* 16 bits */
3132 POPW(ssp, sp, sp_mask, new_eip);
3133 POPW(ssp, sp, sp_mask, new_cs);
3134 POPW(ssp, sp, sp_mask, new_eflags);
3135 }
3136#ifdef VBOX
3137 if ( (env->eflags & VM_MASK)
3138 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3139 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3140 {
3141 fVME = true;
3142 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3143 /* if TF will be set -> #GP */
3144 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3145 || (new_eflags & TF_MASK))
3146 raise_exception(EXCP0D_GPF);
3147 }
3148#endif /* VBOX */
3149 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3150 env->segs[R_CS].selector = new_cs;
3151 env->segs[R_CS].base = (new_cs << 4);
3152 env->eip = new_eip;
3153#ifdef VBOX
3154 if (fVME)
3155 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3156 else
3157#endif
3158 if (env->eflags & VM_MASK)
3159 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3160 else
3161 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3162 if (shift == 0)
3163 eflags_mask &= 0xffff;
3164 load_eflags(new_eflags, eflags_mask);
3165 env->hflags2 &= ~HF2_NMI_MASK;
3166#ifdef VBOX
3167 if (fVME)
3168 {
3169 if (new_eflags & IF_MASK)
3170 env->eflags |= VIF_MASK;
3171 else
3172 env->eflags &= ~VIF_MASK;
3173 }
3174#endif /* VBOX */
3175}
3176
3177#ifndef VBOX
3178static inline void validate_seg(int seg_reg, int cpl)
3179#else /* VBOX */
3180DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3181#endif /* VBOX */
3182{
3183 int dpl;
3184 uint32_t e2;
3185
3186 /* XXX: on x86_64, we do not want to nullify FS and GS because
3187 they may still contain a valid base. I would be interested to
3188 know how a real x86_64 CPU behaves */
3189 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3190 (env->segs[seg_reg].selector & 0xfffc) == 0)
3191 return;
3192
3193 e2 = env->segs[seg_reg].flags;
3194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3195 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3196 /* data or non conforming code segment */
3197 if (dpl < cpl) {
3198 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3199 }
3200 }
3201}
3202
3203/* protected mode iret */
3204#ifndef VBOX
3205static inline void helper_ret_protected(int shift, int is_iret, int addend)
3206#else /* VBOX */
3207DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3208#endif /* VBOX */
3209{
3210 uint32_t new_cs, new_eflags, new_ss;
3211 uint32_t new_es, new_ds, new_fs, new_gs;
3212 uint32_t e1, e2, ss_e1, ss_e2;
3213 int cpl, dpl, rpl, eflags_mask, iopl;
3214 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3215
3216#ifdef VBOX
3217 ss_e1 = ss_e2 = e1 = e2 = 0;
3218#endif
3219
3220#ifdef TARGET_X86_64
3221 if (shift == 2)
3222 sp_mask = -1;
3223 else
3224#endif
3225 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3226 sp = ESP;
3227 ssp = env->segs[R_SS].base;
3228 new_eflags = 0; /* avoid warning */
3229#ifdef TARGET_X86_64
3230 if (shift == 2) {
3231 POPQ(sp, new_eip);
3232 POPQ(sp, new_cs);
3233 new_cs &= 0xffff;
3234 if (is_iret) {
3235 POPQ(sp, new_eflags);
3236 }
3237 } else
3238#endif
3239 if (shift == 1) {
3240 /* 32 bits */
3241 POPL(ssp, sp, sp_mask, new_eip);
3242 POPL(ssp, sp, sp_mask, new_cs);
3243 new_cs &= 0xffff;
3244 if (is_iret) {
3245 POPL(ssp, sp, sp_mask, new_eflags);
3246#if defined(VBOX) && defined(DEBUG)
3247 printf("iret: new CS %04X\n", new_cs);
3248 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3249 printf("iret: new EFLAGS %08X\n", new_eflags);
3250 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3251#endif
3252 if (new_eflags & VM_MASK)
3253 goto return_to_vm86;
3254 }
3255#ifdef VBOX
3256 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3257 {
3258#ifdef DEBUG
3259 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3260#endif
3261 new_cs = new_cs & 0xfffc;
3262 }
3263#endif
3264 } else {
3265 /* 16 bits */
3266 POPW(ssp, sp, sp_mask, new_eip);
3267 POPW(ssp, sp, sp_mask, new_cs);
3268 if (is_iret)
3269 POPW(ssp, sp, sp_mask, new_eflags);
3270 }
3271#ifdef DEBUG_PCALL
3272 if (loglevel & CPU_LOG_PCALL) {
3273 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3274 new_cs, new_eip, shift, addend);
3275 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3276 }
3277#endif
3278 if ((new_cs & 0xfffc) == 0)
3279 {
3280#if defined(VBOX) && defined(DEBUG)
3281 printf("new_cs & 0xfffc) == 0\n");
3282#endif
3283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3284 }
3285 if (load_segment(&e1, &e2, new_cs) != 0)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("load_segment failed\n");
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 if (!(e2 & DESC_S_MASK) ||
3293 !(e2 & DESC_CS_MASK))
3294 {
3295#if defined(VBOX) && defined(DEBUG)
3296 printf("e2 mask %08x\n", e2);
3297#endif
3298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3299 }
3300 cpl = env->hflags & HF_CPL_MASK;
3301 rpl = new_cs & 3;
3302 if (rpl < cpl)
3303 {
3304#if defined(VBOX) && defined(DEBUG)
3305 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3306#endif
3307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3308 }
3309 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3310 if (e2 & DESC_C_MASK) {
3311 if (dpl > rpl)
3312 {
3313#if defined(VBOX) && defined(DEBUG)
3314 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3315#endif
3316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3317 }
3318 } else {
3319 if (dpl != rpl)
3320 {
3321#if defined(VBOX) && defined(DEBUG)
3322 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3323#endif
3324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3325 }
3326 }
3327 if (!(e2 & DESC_P_MASK))
3328 {
3329#if defined(VBOX) && defined(DEBUG)
3330 printf("DESC_P_MASK e2=%08x\n", e2);
3331#endif
3332 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3333 }
3334
3335 sp += addend;
3336 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3337 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3338 /* return to same privilege level */
3339 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3340 get_seg_base(e1, e2),
3341 get_seg_limit(e1, e2),
3342 e2);
3343 } else {
3344 /* return to different privilege level */
3345#ifdef TARGET_X86_64
3346 if (shift == 2) {
3347 POPQ(sp, new_esp);
3348 POPQ(sp, new_ss);
3349 new_ss &= 0xffff;
3350 } else
3351#endif
3352 if (shift == 1) {
3353 /* 32 bits */
3354 POPL(ssp, sp, sp_mask, new_esp);
3355 POPL(ssp, sp, sp_mask, new_ss);
3356 new_ss &= 0xffff;
3357 } else {
3358 /* 16 bits */
3359 POPW(ssp, sp, sp_mask, new_esp);
3360 POPW(ssp, sp, sp_mask, new_ss);
3361 }
3362#ifdef DEBUG_PCALL
3363 if (loglevel & CPU_LOG_PCALL) {
3364 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3365 new_ss, new_esp);
3366 }
3367#endif
3368 if ((new_ss & 0xfffc) == 0) {
3369#ifdef TARGET_X86_64
3370 /* NULL ss is allowed in long mode if cpl != 3*/
3371 /* XXX: test CS64 ? */
3372 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3373 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3374 0, 0xffffffff,
3375 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3376 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3377 DESC_W_MASK | DESC_A_MASK);
3378 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3379 } else
3380#endif
3381 {
3382 raise_exception_err(EXCP0D_GPF, 0);
3383 }
3384 } else {
3385 if ((new_ss & 3) != rpl)
3386 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3387 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3388 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3389 if (!(ss_e2 & DESC_S_MASK) ||
3390 (ss_e2 & DESC_CS_MASK) ||
3391 !(ss_e2 & DESC_W_MASK))
3392 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3393 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3394 if (dpl != rpl)
3395 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3396 if (!(ss_e2 & DESC_P_MASK))
3397 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3398 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3399 get_seg_base(ss_e1, ss_e2),
3400 get_seg_limit(ss_e1, ss_e2),
3401 ss_e2);
3402 }
3403
3404 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3405 get_seg_base(e1, e2),
3406 get_seg_limit(e1, e2),
3407 e2);
3408 cpu_x86_set_cpl(env, rpl);
3409 sp = new_esp;
3410#ifdef TARGET_X86_64
3411 if (env->hflags & HF_CS64_MASK)
3412 sp_mask = -1;
3413 else
3414#endif
3415 sp_mask = get_sp_mask(ss_e2);
3416
3417 /* validate data segments */
3418 validate_seg(R_ES, rpl);
3419 validate_seg(R_DS, rpl);
3420 validate_seg(R_FS, rpl);
3421 validate_seg(R_GS, rpl);
3422
3423 sp += addend;
3424 }
3425 SET_ESP(sp, sp_mask);
3426 env->eip = new_eip;
3427 if (is_iret) {
3428 /* NOTE: 'cpl' is the _old_ CPL */
3429 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3430 if (cpl == 0)
3431#ifdef VBOX
3432 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3433#else
3434 eflags_mask |= IOPL_MASK;
3435#endif
3436 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3437 if (cpl <= iopl)
3438 eflags_mask |= IF_MASK;
3439 if (shift == 0)
3440 eflags_mask &= 0xffff;
3441 load_eflags(new_eflags, eflags_mask);
3442 }
3443 return;
3444
3445 return_to_vm86:
3446 POPL(ssp, sp, sp_mask, new_esp);
3447 POPL(ssp, sp, sp_mask, new_ss);
3448 POPL(ssp, sp, sp_mask, new_es);
3449 POPL(ssp, sp, sp_mask, new_ds);
3450 POPL(ssp, sp, sp_mask, new_fs);
3451 POPL(ssp, sp, sp_mask, new_gs);
3452
3453 /* modify processor state */
3454 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3455 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3456 load_seg_vm(R_CS, new_cs & 0xffff);
3457 cpu_x86_set_cpl(env, 3);
3458 load_seg_vm(R_SS, new_ss & 0xffff);
3459 load_seg_vm(R_ES, new_es & 0xffff);
3460 load_seg_vm(R_DS, new_ds & 0xffff);
3461 load_seg_vm(R_FS, new_fs & 0xffff);
3462 load_seg_vm(R_GS, new_gs & 0xffff);
3463
3464 env->eip = new_eip & 0xffff;
3465 ESP = new_esp;
3466}
3467
3468void helper_iret_protected(int shift, int next_eip)
3469{
3470 int tss_selector, type;
3471 uint32_t e1, e2;
3472
3473#ifdef VBOX
3474 e1 = e2 = 0;
3475 remR3TrapClear(env->pVM);
3476#endif
3477
3478 /* specific case for TSS */
3479 if (env->eflags & NT_MASK) {
3480#ifdef TARGET_X86_64
3481 if (env->hflags & HF_LMA_MASK)
3482 raise_exception_err(EXCP0D_GPF, 0);
3483#endif
3484 tss_selector = lduw_kernel(env->tr.base + 0);
3485 if (tss_selector & 4)
3486 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3487 if (load_segment(&e1, &e2, tss_selector) != 0)
3488 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3489 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3490 /* NOTE: we check both segment and busy TSS */
3491 if (type != 3)
3492 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3493 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3494 } else {
3495 helper_ret_protected(shift, 1, 0);
3496 }
3497 env->hflags2 &= ~HF2_NMI_MASK;
3498#ifdef USE_KQEMU
3499 if (kqemu_is_ok(env)) {
3500 CC_OP = CC_OP_EFLAGS;
3501 env->exception_index = -1;
3502 cpu_loop_exit();
3503 }
3504#endif
3505}
3506
3507void helper_lret_protected(int shift, int addend)
3508{
3509 helper_ret_protected(shift, 0, addend);
3510#ifdef USE_KQEMU
3511 if (kqemu_is_ok(env)) {
3512 env->exception_index = -1;
3513 cpu_loop_exit();
3514 }
3515#endif
3516}
3517
3518void helper_sysenter(void)
3519{
3520 if (env->sysenter_cs == 0) {
3521 raise_exception_err(EXCP0D_GPF, 0);
3522 }
3523 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3524 cpu_x86_set_cpl(env, 0);
3525
3526#ifdef TARGET_X86_64
3527 if (env->hflags & HF_LMA_MASK) {
3528 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3529 0, 0xffffffff,
3530 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3531 DESC_S_MASK |
3532 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3533 } else
3534#endif
3535 {
3536 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3537 0, 0xffffffff,
3538 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3539 DESC_S_MASK |
3540 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3541 }
3542 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3543 0, 0xffffffff,
3544 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3545 DESC_S_MASK |
3546 DESC_W_MASK | DESC_A_MASK);
3547 ESP = env->sysenter_esp;
3548 EIP = env->sysenter_eip;
3549}
3550
3551void helper_sysexit(int dflag)
3552{
3553 int cpl;
3554
3555 cpl = env->hflags & HF_CPL_MASK;
3556 if (env->sysenter_cs == 0 || cpl != 0) {
3557 raise_exception_err(EXCP0D_GPF, 0);
3558 }
3559 cpu_x86_set_cpl(env, 3);
3560#ifdef TARGET_X86_64
3561 if (dflag == 2) {
3562 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3563 0, 0xffffffff,
3564 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3565 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3566 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3567 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3568 0, 0xffffffff,
3569 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3570 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3571 DESC_W_MASK | DESC_A_MASK);
3572 } else
3573#endif
3574 {
3575 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3576 0, 0xffffffff,
3577 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3578 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3579 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3580 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3581 0, 0xffffffff,
3582 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3583 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3584 DESC_W_MASK | DESC_A_MASK);
3585 }
3586 ESP = ECX;
3587 EIP = EDX;
3588#ifdef USE_KQEMU
3589 if (kqemu_is_ok(env)) {
3590 env->exception_index = -1;
3591 cpu_loop_exit();
3592 }
3593#endif
3594}
3595
3596#if defined(CONFIG_USER_ONLY)
3597target_ulong helper_read_crN(int reg)
3598{
3599 return 0;
3600}
3601
3602void helper_write_crN(int reg, target_ulong t0)
3603{
3604}
3605#else
3606target_ulong helper_read_crN(int reg)
3607{
3608 target_ulong val;
3609
3610 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3611 switch(reg) {
3612 default:
3613 val = env->cr[reg];
3614 break;
3615 case 8:
3616 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3617 val = cpu_get_apic_tpr(env);
3618 } else {
3619 val = env->v_tpr;
3620 }
3621 break;
3622 }
3623 return val;
3624}
3625
3626void helper_write_crN(int reg, target_ulong t0)
3627{
3628 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3629 switch(reg) {
3630 case 0:
3631 cpu_x86_update_cr0(env, t0);
3632 break;
3633 case 3:
3634 cpu_x86_update_cr3(env, t0);
3635 break;
3636 case 4:
3637 cpu_x86_update_cr4(env, t0);
3638 break;
3639 case 8:
3640 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3641 cpu_set_apic_tpr(env, t0);
3642 }
3643 env->v_tpr = t0 & 0x0f;
3644 break;
3645 default:
3646 env->cr[reg] = t0;
3647 break;
3648 }
3649}
3650#endif
3651
3652void helper_lmsw(target_ulong t0)
3653{
3654 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3655 if already set to one. */
3656 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3657 helper_write_crN(0, t0);
3658}
3659
3660void helper_clts(void)
3661{
3662 env->cr[0] &= ~CR0_TS_MASK;
3663 env->hflags &= ~HF_TS_MASK;
3664}
3665
3666/* XXX: do more */
3667void helper_movl_drN_T0(int reg, target_ulong t0)
3668{
3669 env->dr[reg] = t0;
3670}
3671
3672void helper_invlpg(target_ulong addr)
3673{
3674 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3675 tlb_flush_page(env, addr);
3676}
3677
3678void helper_rdtsc(void)
3679{
3680 uint64_t val;
3681
3682 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3683 raise_exception(EXCP0D_GPF);
3684 }
3685 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3686
3687 val = cpu_get_tsc(env) + env->tsc_offset;
3688 EAX = (uint32_t)(val);
3689 EDX = (uint32_t)(val >> 32);
3690}
3691
3692#ifdef VBOX
3693void helper_rdtscp(void)
3694{
3695 uint64_t val;
3696 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3697 raise_exception(EXCP0D_GPF);
3698 }
3699
3700 val = cpu_get_tsc(env);
3701 EAX = (uint32_t)(val);
3702 EDX = (uint32_t)(val >> 32);
3703 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3704 ECX = (uint32_t)(val);
3705 else
3706 ECX = 0;
3707}
3708#endif
3709
3710void helper_rdpmc(void)
3711{
3712#ifdef VBOX
3713 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3714 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3715 raise_exception(EXCP0D_GPF);
3716 }
3717 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3718 EAX = 0;
3719 EDX = 0;
3720#else
3721 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3722 raise_exception(EXCP0D_GPF);
3723 }
3724 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3725
3726 /* currently unimplemented */
3727 raise_exception_err(EXCP06_ILLOP, 0);
3728#endif
3729}
3730
3731#if defined(CONFIG_USER_ONLY)
3732void helper_wrmsr(void)
3733{
3734}
3735
3736void helper_rdmsr(void)
3737{
3738}
3739#else
3740void helper_wrmsr(void)
3741{
3742 uint64_t val;
3743
3744 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3745
3746 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3747
3748 switch((uint32_t)ECX) {
3749 case MSR_IA32_SYSENTER_CS:
3750 env->sysenter_cs = val & 0xffff;
3751 break;
3752 case MSR_IA32_SYSENTER_ESP:
3753 env->sysenter_esp = val;
3754 break;
3755 case MSR_IA32_SYSENTER_EIP:
3756 env->sysenter_eip = val;
3757 break;
3758 case MSR_IA32_APICBASE:
3759#ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3760 cpu_set_apic_base(env, val);
3761#endif
3762 break;
3763 case MSR_EFER:
3764 {
3765 uint64_t update_mask;
3766 update_mask = 0;
3767 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3768 update_mask |= MSR_EFER_SCE;
3769 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3770 update_mask |= MSR_EFER_LME;
3771 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3772 update_mask |= MSR_EFER_FFXSR;
3773 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3774 update_mask |= MSR_EFER_NXE;
3775 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3776 update_mask |= MSR_EFER_SVME;
3777 cpu_load_efer(env, (env->efer & ~update_mask) |
3778 (val & update_mask));
3779 }
3780 break;
3781 case MSR_STAR:
3782 env->star = val;
3783 break;
3784 case MSR_PAT:
3785 env->pat = val;
3786 break;
3787 case MSR_VM_HSAVE_PA:
3788 env->vm_hsave = val;
3789 break;
3790#ifdef TARGET_X86_64
3791 case MSR_LSTAR:
3792 env->lstar = val;
3793 break;
3794 case MSR_CSTAR:
3795 env->cstar = val;
3796 break;
3797 case MSR_FMASK:
3798 env->fmask = val;
3799 break;
3800 case MSR_FSBASE:
3801 env->segs[R_FS].base = val;
3802 break;
3803 case MSR_GSBASE:
3804 env->segs[R_GS].base = val;
3805 break;
3806 case MSR_KERNELGSBASE:
3807 env->kernelgsbase = val;
3808 break;
3809#endif
3810 default:
3811#ifndef VBOX
3812 /* XXX: exception ? */
3813#endif
3814 break;
3815 }
3816
3817#ifdef VBOX
3818 /* call CPUM. */
3819 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3820 {
3821 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3822 }
3823#endif
3824}
3825
3826void helper_rdmsr(void)
3827{
3828 uint64_t val;
3829 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3830
3831 switch((uint32_t)ECX) {
3832 case MSR_IA32_SYSENTER_CS:
3833 val = env->sysenter_cs;
3834 break;
3835 case MSR_IA32_SYSENTER_ESP:
3836 val = env->sysenter_esp;
3837 break;
3838 case MSR_IA32_SYSENTER_EIP:
3839 val = env->sysenter_eip;
3840 break;
3841 case MSR_IA32_APICBASE:
3842 val = cpu_get_apic_base(env);
3843 break;
3844 case MSR_EFER:
3845 val = env->efer;
3846 break;
3847 case MSR_STAR:
3848 val = env->star;
3849 break;
3850 case MSR_PAT:
3851 val = env->pat;
3852 break;
3853 case MSR_VM_HSAVE_PA:
3854 val = env->vm_hsave;
3855 break;
3856#ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3857 case MSR_IA32_PERF_STATUS:
3858 /* tsc_increment_by_tick */
3859 val = 1000ULL;
3860 /* CPU multiplier */
3861 val |= ((uint64_t)4ULL << 40);
3862 break;
3863#endif
3864#ifdef TARGET_X86_64
3865 case MSR_LSTAR:
3866 val = env->lstar;
3867 break;
3868 case MSR_CSTAR:
3869 val = env->cstar;
3870 break;
3871 case MSR_FMASK:
3872 val = env->fmask;
3873 break;
3874 case MSR_FSBASE:
3875 val = env->segs[R_FS].base;
3876 break;
3877 case MSR_GSBASE:
3878 val = env->segs[R_GS].base;
3879 break;
3880 case MSR_KERNELGSBASE:
3881 val = env->kernelgsbase;
3882 break;
3883#endif
3884#ifdef USE_KQEMU
3885 case MSR_QPI_COMMBASE:
3886 if (env->kqemu_enabled) {
3887 val = kqemu_comm_base;
3888 } else {
3889 val = 0;
3890 }
3891 break;
3892#endif
3893 default:
3894#ifndef VBOX
3895 /* XXX: exception ? */
3896 val = 0;
3897#else /* VBOX */
3898 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3899 {
3900 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3901 val = 0;
3902 }
3903#endif
3904 break;
3905 }
3906 EAX = (uint32_t)(val);
3907 EDX = (uint32_t)(val >> 32);
3908
3909#ifdef VBOX_STRICT
3910 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
3911 val = 0;
3912 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
3913#endif
3914}
3915#endif
3916
3917target_ulong helper_lsl(target_ulong selector1)
3918{
3919 unsigned int limit;
3920 uint32_t e1, e2, eflags, selector;
3921 int rpl, dpl, cpl, type;
3922
3923 selector = selector1 & 0xffff;
3924 eflags = cc_table[CC_OP].compute_all();
3925 if (load_segment(&e1, &e2, selector) != 0)
3926 goto fail;
3927 rpl = selector & 3;
3928 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3929 cpl = env->hflags & HF_CPL_MASK;
3930 if (e2 & DESC_S_MASK) {
3931 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3932 /* conforming */
3933 } else {
3934 if (dpl < cpl || dpl < rpl)
3935 goto fail;
3936 }
3937 } else {
3938 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3939 switch(type) {
3940 case 1:
3941 case 2:
3942 case 3:
3943 case 9:
3944 case 11:
3945 break;
3946 default:
3947 goto fail;
3948 }
3949 if (dpl < cpl || dpl < rpl) {
3950 fail:
3951 CC_SRC = eflags & ~CC_Z;
3952 return 0;
3953 }
3954 }
3955 limit = get_seg_limit(e1, e2);
3956 CC_SRC = eflags | CC_Z;
3957 return limit;
3958}
3959
3960target_ulong helper_lar(target_ulong selector1)
3961{
3962 uint32_t e1, e2, eflags, selector;
3963 int rpl, dpl, cpl, type;
3964
3965 selector = selector1 & 0xffff;
3966 eflags = cc_table[CC_OP].compute_all();
3967 if ((selector & 0xfffc) == 0)
3968 goto fail;
3969 if (load_segment(&e1, &e2, selector) != 0)
3970 goto fail;
3971 rpl = selector & 3;
3972 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3973 cpl = env->hflags & HF_CPL_MASK;
3974 if (e2 & DESC_S_MASK) {
3975 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3976 /* conforming */
3977 } else {
3978 if (dpl < cpl || dpl < rpl)
3979 goto fail;
3980 }
3981 } else {
3982 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3983 switch(type) {
3984 case 1:
3985 case 2:
3986 case 3:
3987 case 4:
3988 case 5:
3989 case 9:
3990 case 11:
3991 case 12:
3992 break;
3993 default:
3994 goto fail;
3995 }
3996 if (dpl < cpl || dpl < rpl) {
3997 fail:
3998 CC_SRC = eflags & ~CC_Z;
3999 return 0;
4000 }
4001 }
4002 CC_SRC = eflags | CC_Z;
4003 return e2 & 0x00f0ff00;
4004}
4005
4006void helper_verr(target_ulong selector1)
4007{
4008 uint32_t e1, e2, eflags, selector;
4009 int rpl, dpl, cpl;
4010
4011 selector = selector1 & 0xffff;
4012 eflags = cc_table[CC_OP].compute_all();
4013 if ((selector & 0xfffc) == 0)
4014 goto fail;
4015 if (load_segment(&e1, &e2, selector) != 0)
4016 goto fail;
4017 if (!(e2 & DESC_S_MASK))
4018 goto fail;
4019 rpl = selector & 3;
4020 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4021 cpl = env->hflags & HF_CPL_MASK;
4022 if (e2 & DESC_CS_MASK) {
4023 if (!(e2 & DESC_R_MASK))
4024 goto fail;
4025 if (!(e2 & DESC_C_MASK)) {
4026 if (dpl < cpl || dpl < rpl)
4027 goto fail;
4028 }
4029 } else {
4030 if (dpl < cpl || dpl < rpl) {
4031 fail:
4032 CC_SRC = eflags & ~CC_Z;
4033 return;
4034 }
4035 }
4036 CC_SRC = eflags | CC_Z;
4037}
4038
4039void helper_verw(target_ulong selector1)
4040{
4041 uint32_t e1, e2, eflags, selector;
4042 int rpl, dpl, cpl;
4043
4044 selector = selector1 & 0xffff;
4045 eflags = cc_table[CC_OP].compute_all();
4046 if ((selector & 0xfffc) == 0)
4047 goto fail;
4048 if (load_segment(&e1, &e2, selector) != 0)
4049 goto fail;
4050 if (!(e2 & DESC_S_MASK))
4051 goto fail;
4052 rpl = selector & 3;
4053 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4054 cpl = env->hflags & HF_CPL_MASK;
4055 if (e2 & DESC_CS_MASK) {
4056 goto fail;
4057 } else {
4058 if (dpl < cpl || dpl < rpl)
4059 goto fail;
4060 if (!(e2 & DESC_W_MASK)) {
4061 fail:
4062 CC_SRC = eflags & ~CC_Z;
4063 return;
4064 }
4065 }
4066 CC_SRC = eflags | CC_Z;
4067}
4068
4069/* x87 FPU helpers */
4070
4071static void fpu_set_exception(int mask)
4072{
4073 env->fpus |= mask;
4074 if (env->fpus & (~env->fpuc & FPUC_EM))
4075 env->fpus |= FPUS_SE | FPUS_B;
4076}
4077
4078#ifndef VBOX
4079static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4080#else /* VBOX */
4081DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4082#endif /* VBOX */
4083{
4084 if (b == 0.0)
4085 fpu_set_exception(FPUS_ZE);
4086 return a / b;
4087}
4088
4089void fpu_raise_exception(void)
4090{
4091 if (env->cr[0] & CR0_NE_MASK) {
4092 raise_exception(EXCP10_COPR);
4093 }
4094#if !defined(CONFIG_USER_ONLY)
4095 else {
4096 cpu_set_ferr(env);
4097 }
4098#endif
4099}
4100
4101void helper_flds_FT0(uint32_t val)
4102{
4103 union {
4104 float32 f;
4105 uint32_t i;
4106 } u;
4107 u.i = val;
4108 FT0 = float32_to_floatx(u.f, &env->fp_status);
4109}
4110
4111void helper_fldl_FT0(uint64_t val)
4112{
4113 union {
4114 float64 f;
4115 uint64_t i;
4116 } u;
4117 u.i = val;
4118 FT0 = float64_to_floatx(u.f, &env->fp_status);
4119}
4120
4121void helper_fildl_FT0(int32_t val)
4122{
4123 FT0 = int32_to_floatx(val, &env->fp_status);
4124}
4125
4126void helper_flds_ST0(uint32_t val)
4127{
4128 int new_fpstt;
4129 union {
4130 float32 f;
4131 uint32_t i;
4132 } u;
4133 new_fpstt = (env->fpstt - 1) & 7;
4134 u.i = val;
4135 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4136 env->fpstt = new_fpstt;
4137 env->fptags[new_fpstt] = 0; /* validate stack entry */
4138}
4139
4140void helper_fldl_ST0(uint64_t val)
4141{
4142 int new_fpstt;
4143 union {
4144 float64 f;
4145 uint64_t i;
4146 } u;
4147 new_fpstt = (env->fpstt - 1) & 7;
4148 u.i = val;
4149 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4150 env->fpstt = new_fpstt;
4151 env->fptags[new_fpstt] = 0; /* validate stack entry */
4152}
4153
4154void helper_fildl_ST0(int32_t val)
4155{
4156 int new_fpstt;
4157 new_fpstt = (env->fpstt - 1) & 7;
4158 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4159 env->fpstt = new_fpstt;
4160 env->fptags[new_fpstt] = 0; /* validate stack entry */
4161}
4162
4163void helper_fildll_ST0(int64_t val)
4164{
4165 int new_fpstt;
4166 new_fpstt = (env->fpstt - 1) & 7;
4167 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4168 env->fpstt = new_fpstt;
4169 env->fptags[new_fpstt] = 0; /* validate stack entry */
4170}
4171
4172#ifndef VBOX
4173uint32_t helper_fsts_ST0(void)
4174#else
4175RTCCUINTREG helper_fsts_ST0(void)
4176#endif
4177{
4178 union {
4179 float32 f;
4180 uint32_t i;
4181 } u;
4182 u.f = floatx_to_float32(ST0, &env->fp_status);
4183 return u.i;
4184}
4185
4186uint64_t helper_fstl_ST0(void)
4187{
4188 union {
4189 float64 f;
4190 uint64_t i;
4191 } u;
4192 u.f = floatx_to_float64(ST0, &env->fp_status);
4193 return u.i;
4194}
4195#ifndef VBOX
4196int32_t helper_fist_ST0(void)
4197#else
4198RTCCINTREG helper_fist_ST0(void)
4199#endif
4200{
4201 int32_t val;
4202 val = floatx_to_int32(ST0, &env->fp_status);
4203 if (val != (int16_t)val)
4204 val = -32768;
4205 return val;
4206}
4207
4208#ifndef VBOX
4209int32_t helper_fistl_ST0(void)
4210#else
4211RTCCINTREG helper_fistl_ST0(void)
4212#endif
4213{
4214 int32_t val;
4215 val = floatx_to_int32(ST0, &env->fp_status);
4216 return val;
4217}
4218
4219int64_t helper_fistll_ST0(void)
4220{
4221 int64_t val;
4222 val = floatx_to_int64(ST0, &env->fp_status);
4223 return val;
4224}
4225
4226#ifndef VBOX
4227int32_t helper_fistt_ST0(void)
4228#else
4229RTCCINTREG helper_fistt_ST0(void)
4230#endif
4231{
4232 int32_t val;
4233 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4234 if (val != (int16_t)val)
4235 val = -32768;
4236 return val;
4237}
4238
4239#ifndef VBOX
4240int32_t helper_fisttl_ST0(void)
4241#else
4242RTCCINTREG helper_fisttl_ST0(void)
4243#endif
4244{
4245 int32_t val;
4246 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4247 return val;
4248}
4249
4250int64_t helper_fisttll_ST0(void)
4251{
4252 int64_t val;
4253 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4254 return val;
4255}
4256
4257void helper_fldt_ST0(target_ulong ptr)
4258{
4259 int new_fpstt;
4260 new_fpstt = (env->fpstt - 1) & 7;
4261 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4262 env->fpstt = new_fpstt;
4263 env->fptags[new_fpstt] = 0; /* validate stack entry */
4264}
4265
4266void helper_fstt_ST0(target_ulong ptr)
4267{
4268 helper_fstt(ST0, ptr);
4269}
4270
4271void helper_fpush(void)
4272{
4273 fpush();
4274}
4275
4276void helper_fpop(void)
4277{
4278 fpop();
4279}
4280
4281void helper_fdecstp(void)
4282{
4283 env->fpstt = (env->fpstt - 1) & 7;
4284 env->fpus &= (~0x4700);
4285}
4286
4287void helper_fincstp(void)
4288{
4289 env->fpstt = (env->fpstt + 1) & 7;
4290 env->fpus &= (~0x4700);
4291}
4292
4293/* FPU move */
4294
4295void helper_ffree_STN(int st_index)
4296{
4297 env->fptags[(env->fpstt + st_index) & 7] = 1;
4298}
4299
4300void helper_fmov_ST0_FT0(void)
4301{
4302 ST0 = FT0;
4303}
4304
4305void helper_fmov_FT0_STN(int st_index)
4306{
4307 FT0 = ST(st_index);
4308}
4309
4310void helper_fmov_ST0_STN(int st_index)
4311{
4312 ST0 = ST(st_index);
4313}
4314
4315void helper_fmov_STN_ST0(int st_index)
4316{
4317 ST(st_index) = ST0;
4318}
4319
4320void helper_fxchg_ST0_STN(int st_index)
4321{
4322 CPU86_LDouble tmp;
4323 tmp = ST(st_index);
4324 ST(st_index) = ST0;
4325 ST0 = tmp;
4326}
4327
4328/* FPU operations */
4329
4330static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4331
4332void helper_fcom_ST0_FT0(void)
4333{
4334 int ret;
4335
4336 ret = floatx_compare(ST0, FT0, &env->fp_status);
4337 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4338 FORCE_RET();
4339}
4340
4341void helper_fucom_ST0_FT0(void)
4342{
4343 int ret;
4344
4345 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4346 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4347 FORCE_RET();
4348}
4349
4350static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4351
4352void helper_fcomi_ST0_FT0(void)
4353{
4354 int eflags;
4355 int ret;
4356
4357 ret = floatx_compare(ST0, FT0, &env->fp_status);
4358 eflags = cc_table[CC_OP].compute_all();
4359 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4360 CC_SRC = eflags;
4361 FORCE_RET();
4362}
4363
4364void helper_fucomi_ST0_FT0(void)
4365{
4366 int eflags;
4367 int ret;
4368
4369 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4370 eflags = cc_table[CC_OP].compute_all();
4371 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4372 CC_SRC = eflags;
4373 FORCE_RET();
4374}
4375
4376void helper_fadd_ST0_FT0(void)
4377{
4378 ST0 += FT0;
4379}
4380
4381void helper_fmul_ST0_FT0(void)
4382{
4383 ST0 *= FT0;
4384}
4385
4386void helper_fsub_ST0_FT0(void)
4387{
4388 ST0 -= FT0;
4389}
4390
4391void helper_fsubr_ST0_FT0(void)
4392{
4393 ST0 = FT0 - ST0;
4394}
4395
4396void helper_fdiv_ST0_FT0(void)
4397{
4398 ST0 = helper_fdiv(ST0, FT0);
4399}
4400
4401void helper_fdivr_ST0_FT0(void)
4402{
4403 ST0 = helper_fdiv(FT0, ST0);
4404}
4405
4406/* fp operations between STN and ST0 */
4407
4408void helper_fadd_STN_ST0(int st_index)
4409{
4410 ST(st_index) += ST0;
4411}
4412
4413void helper_fmul_STN_ST0(int st_index)
4414{
4415 ST(st_index) *= ST0;
4416}
4417
4418void helper_fsub_STN_ST0(int st_index)
4419{
4420 ST(st_index) -= ST0;
4421}
4422
4423void helper_fsubr_STN_ST0(int st_index)
4424{
4425 CPU86_LDouble *p;
4426 p = &ST(st_index);
4427 *p = ST0 - *p;
4428}
4429
4430void helper_fdiv_STN_ST0(int st_index)
4431{
4432 CPU86_LDouble *p;
4433 p = &ST(st_index);
4434 *p = helper_fdiv(*p, ST0);
4435}
4436
4437void helper_fdivr_STN_ST0(int st_index)
4438{
4439 CPU86_LDouble *p;
4440 p = &ST(st_index);
4441 *p = helper_fdiv(ST0, *p);
4442}
4443
4444/* misc FPU operations */
4445void helper_fchs_ST0(void)
4446{
4447 ST0 = floatx_chs(ST0);
4448}
4449
4450void helper_fabs_ST0(void)
4451{
4452 ST0 = floatx_abs(ST0);
4453}
4454
4455void helper_fld1_ST0(void)
4456{
4457 ST0 = f15rk[1];
4458}
4459
4460void helper_fldl2t_ST0(void)
4461{
4462 ST0 = f15rk[6];
4463}
4464
4465void helper_fldl2e_ST0(void)
4466{
4467 ST0 = f15rk[5];
4468}
4469
4470void helper_fldpi_ST0(void)
4471{
4472 ST0 = f15rk[2];
4473}
4474
4475void helper_fldlg2_ST0(void)
4476{
4477 ST0 = f15rk[3];
4478}
4479
4480void helper_fldln2_ST0(void)
4481{
4482 ST0 = f15rk[4];
4483}
4484
4485void helper_fldz_ST0(void)
4486{
4487 ST0 = f15rk[0];
4488}
4489
4490void helper_fldz_FT0(void)
4491{
4492 FT0 = f15rk[0];
4493}
4494
4495#ifndef VBOX
4496uint32_t helper_fnstsw(void)
4497#else
4498RTCCUINTREG helper_fnstsw(void)
4499#endif
4500{
4501 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4502}
4503
4504#ifndef VBOX
4505uint32_t helper_fnstcw(void)
4506#else
4507RTCCUINTREG helper_fnstcw(void)
4508#endif
4509{
4510 return env->fpuc;
4511}
4512
4513static void update_fp_status(void)
4514{
4515 int rnd_type;
4516
4517 /* set rounding mode */
4518 switch(env->fpuc & RC_MASK) {
4519 default:
4520 case RC_NEAR:
4521 rnd_type = float_round_nearest_even;
4522 break;
4523 case RC_DOWN:
4524 rnd_type = float_round_down;
4525 break;
4526 case RC_UP:
4527 rnd_type = float_round_up;
4528 break;
4529 case RC_CHOP:
4530 rnd_type = float_round_to_zero;
4531 break;
4532 }
4533 set_float_rounding_mode(rnd_type, &env->fp_status);
4534#ifdef FLOATX80
4535 switch((env->fpuc >> 8) & 3) {
4536 case 0:
4537 rnd_type = 32;
4538 break;
4539 case 2:
4540 rnd_type = 64;
4541 break;
4542 case 3:
4543 default:
4544 rnd_type = 80;
4545 break;
4546 }
4547 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4548#endif
4549}
4550
4551void helper_fldcw(uint32_t val)
4552{
4553 env->fpuc = val;
4554 update_fp_status();
4555}
4556
4557void helper_fclex(void)
4558{
4559 env->fpus &= 0x7f00;
4560}
4561
4562void helper_fwait(void)
4563{
4564 if (env->fpus & FPUS_SE)
4565 fpu_raise_exception();
4566 FORCE_RET();
4567}
4568
4569void helper_fninit(void)
4570{
4571 env->fpus = 0;
4572 env->fpstt = 0;
4573 env->fpuc = 0x37f;
4574 env->fptags[0] = 1;
4575 env->fptags[1] = 1;
4576 env->fptags[2] = 1;
4577 env->fptags[3] = 1;
4578 env->fptags[4] = 1;
4579 env->fptags[5] = 1;
4580 env->fptags[6] = 1;
4581 env->fptags[7] = 1;
4582}
4583
4584/* BCD ops */
4585
4586void helper_fbld_ST0(target_ulong ptr)
4587{
4588 CPU86_LDouble tmp;
4589 uint64_t val;
4590 unsigned int v;
4591 int i;
4592
4593 val = 0;
4594 for(i = 8; i >= 0; i--) {
4595 v = ldub(ptr + i);
4596 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4597 }
4598 tmp = val;
4599 if (ldub(ptr + 9) & 0x80)
4600 tmp = -tmp;
4601 fpush();
4602 ST0 = tmp;
4603}
4604
4605void helper_fbst_ST0(target_ulong ptr)
4606{
4607 int v;
4608 target_ulong mem_ref, mem_end;
4609 int64_t val;
4610
4611 val = floatx_to_int64(ST0, &env->fp_status);
4612 mem_ref = ptr;
4613 mem_end = mem_ref + 9;
4614 if (val < 0) {
4615 stb(mem_end, 0x80);
4616 val = -val;
4617 } else {
4618 stb(mem_end, 0x00);
4619 }
4620 while (mem_ref < mem_end) {
4621 if (val == 0)
4622 break;
4623 v = val % 100;
4624 val = val / 100;
4625 v = ((v / 10) << 4) | (v % 10);
4626 stb(mem_ref++, v);
4627 }
4628 while (mem_ref < mem_end) {
4629 stb(mem_ref++, 0);
4630 }
4631}
4632
4633void helper_f2xm1(void)
4634{
4635 ST0 = pow(2.0,ST0) - 1.0;
4636}
4637
4638void helper_fyl2x(void)
4639{
4640 CPU86_LDouble fptemp;
4641
4642 fptemp = ST0;
4643 if (fptemp>0.0){
4644 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4645 ST1 *= fptemp;
4646 fpop();
4647 } else {
4648 env->fpus &= (~0x4700);
4649 env->fpus |= 0x400;
4650 }
4651}
4652
4653void helper_fptan(void)
4654{
4655 CPU86_LDouble fptemp;
4656
4657 fptemp = ST0;
4658 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4659 env->fpus |= 0x400;
4660 } else {
4661 ST0 = tan(fptemp);
4662 fpush();
4663 ST0 = 1.0;
4664 env->fpus &= (~0x400); /* C2 <-- 0 */
4665 /* the above code is for |arg| < 2**52 only */
4666 }
4667}
4668
4669void helper_fpatan(void)
4670{
4671 CPU86_LDouble fptemp, fpsrcop;
4672
4673 fpsrcop = ST1;
4674 fptemp = ST0;
4675 ST1 = atan2(fpsrcop,fptemp);
4676 fpop();
4677}
4678
4679void helper_fxtract(void)
4680{
4681 CPU86_LDoubleU temp;
4682 unsigned int expdif;
4683
4684 temp.d = ST0;
4685 expdif = EXPD(temp) - EXPBIAS;
4686 /*DP exponent bias*/
4687 ST0 = expdif;
4688 fpush();
4689 BIASEXPONENT(temp);
4690 ST0 = temp.d;
4691}
4692
4693#ifdef VBOX
4694#ifdef _MSC_VER
4695/* MSC cannot divide by zero */
4696extern double _Nan;
4697#define NaN _Nan
4698#else
4699#define NaN (0.0 / 0.0)
4700#endif
4701#endif /* VBOX */
4702
4703void helper_fprem1(void)
4704{
4705 CPU86_LDouble dblq, fpsrcop, fptemp;
4706 CPU86_LDoubleU fpsrcop1, fptemp1;
4707 int expdif;
4708 signed long long int q;
4709
4710#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4711 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4712#else
4713 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4714#endif
4715 ST0 = 0.0 / 0.0; /* NaN */
4716 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4717 return;
4718 }
4719
4720 fpsrcop = ST0;
4721 fptemp = ST1;
4722 fpsrcop1.d = fpsrcop;
4723 fptemp1.d = fptemp;
4724 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4725
4726 if (expdif < 0) {
4727 /* optimisation? taken from the AMD docs */
4728 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4729 /* ST0 is unchanged */
4730 return;
4731 }
4732
4733 if (expdif < 53) {
4734 dblq = fpsrcop / fptemp;
4735 /* round dblq towards nearest integer */
4736 dblq = rint(dblq);
4737 ST0 = fpsrcop - fptemp * dblq;
4738
4739 /* convert dblq to q by truncating towards zero */
4740 if (dblq < 0.0)
4741 q = (signed long long int)(-dblq);
4742 else
4743 q = (signed long long int)dblq;
4744
4745 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4746 /* (C0,C3,C1) <-- (q2,q1,q0) */
4747 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4748 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4749 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4750 } else {
4751 env->fpus |= 0x400; /* C2 <-- 1 */
4752 fptemp = pow(2.0, expdif - 50);
4753 fpsrcop = (ST0 / ST1) / fptemp;
4754 /* fpsrcop = integer obtained by chopping */
4755 fpsrcop = (fpsrcop < 0.0) ?
4756 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4757 ST0 -= (ST1 * fpsrcop * fptemp);
4758 }
4759}
4760
4761void helper_fprem(void)
4762{
4763 CPU86_LDouble dblq, fpsrcop, fptemp;
4764 CPU86_LDoubleU fpsrcop1, fptemp1;
4765 int expdif;
4766 signed long long int q;
4767
4768#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4769 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4770#else
4771 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4772#endif
4773 ST0 = 0.0 / 0.0; /* NaN */
4774 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4775 return;
4776 }
4777
4778 fpsrcop = (CPU86_LDouble)ST0;
4779 fptemp = (CPU86_LDouble)ST1;
4780 fpsrcop1.d = fpsrcop;
4781 fptemp1.d = fptemp;
4782 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4783
4784 if (expdif < 0) {
4785 /* optimisation? taken from the AMD docs */
4786 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4787 /* ST0 is unchanged */
4788 return;
4789 }
4790
4791 if ( expdif < 53 ) {
4792 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4793 /* round dblq towards zero */
4794 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4795 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4796
4797 /* convert dblq to q by truncating towards zero */
4798 if (dblq < 0.0)
4799 q = (signed long long int)(-dblq);
4800 else
4801 q = (signed long long int)dblq;
4802
4803 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4804 /* (C0,C3,C1) <-- (q2,q1,q0) */
4805 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4806 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4807 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4808 } else {
4809 int N = 32 + (expdif % 32); /* as per AMD docs */
4810 env->fpus |= 0x400; /* C2 <-- 1 */
4811 fptemp = pow(2.0, (double)(expdif - N));
4812 fpsrcop = (ST0 / ST1) / fptemp;
4813 /* fpsrcop = integer obtained by chopping */
4814 fpsrcop = (fpsrcop < 0.0) ?
4815 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4816 ST0 -= (ST1 * fpsrcop * fptemp);
4817 }
4818}
4819
4820void helper_fyl2xp1(void)
4821{
4822 CPU86_LDouble fptemp;
4823
4824 fptemp = ST0;
4825 if ((fptemp+1.0)>0.0) {
4826 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4827 ST1 *= fptemp;
4828 fpop();
4829 } else {
4830 env->fpus &= (~0x4700);
4831 env->fpus |= 0x400;
4832 }
4833}
4834
4835void helper_fsqrt(void)
4836{
4837 CPU86_LDouble fptemp;
4838
4839 fptemp = ST0;
4840 if (fptemp<0.0) {
4841 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4842 env->fpus |= 0x400;
4843 }
4844 ST0 = sqrt(fptemp);
4845}
4846
4847void helper_fsincos(void)
4848{
4849 CPU86_LDouble fptemp;
4850
4851 fptemp = ST0;
4852 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4853 env->fpus |= 0x400;
4854 } else {
4855 ST0 = sin(fptemp);
4856 fpush();
4857 ST0 = cos(fptemp);
4858 env->fpus &= (~0x400); /* C2 <-- 0 */
4859 /* the above code is for |arg| < 2**63 only */
4860 }
4861}
4862
4863void helper_frndint(void)
4864{
4865 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4866}
4867
4868void helper_fscale(void)
4869{
4870 ST0 = ldexp (ST0, (int)(ST1));
4871}
4872
4873void helper_fsin(void)
4874{
4875 CPU86_LDouble fptemp;
4876
4877 fptemp = ST0;
4878 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4879 env->fpus |= 0x400;
4880 } else {
4881 ST0 = sin(fptemp);
4882 env->fpus &= (~0x400); /* C2 <-- 0 */
4883 /* the above code is for |arg| < 2**53 only */
4884 }
4885}
4886
4887void helper_fcos(void)
4888{
4889 CPU86_LDouble fptemp;
4890
4891 fptemp = ST0;
4892 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4893 env->fpus |= 0x400;
4894 } else {
4895 ST0 = cos(fptemp);
4896 env->fpus &= (~0x400); /* C2 <-- 0 */
4897 /* the above code is for |arg5 < 2**63 only */
4898 }
4899}
4900
4901void helper_fxam_ST0(void)
4902{
4903 CPU86_LDoubleU temp;
4904 int expdif;
4905
4906 temp.d = ST0;
4907
4908 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4909 if (SIGND(temp))
4910 env->fpus |= 0x200; /* C1 <-- 1 */
4911
4912 /* XXX: test fptags too */
4913 expdif = EXPD(temp);
4914 if (expdif == MAXEXPD) {
4915#ifdef USE_X86LDOUBLE
4916 if (MANTD(temp) == 0x8000000000000000ULL)
4917#else
4918 if (MANTD(temp) == 0)
4919#endif
4920 env->fpus |= 0x500 /*Infinity*/;
4921 else
4922 env->fpus |= 0x100 /*NaN*/;
4923 } else if (expdif == 0) {
4924 if (MANTD(temp) == 0)
4925 env->fpus |= 0x4000 /*Zero*/;
4926 else
4927 env->fpus |= 0x4400 /*Denormal*/;
4928 } else {
4929 env->fpus |= 0x400;
4930 }
4931}
4932
4933void helper_fstenv(target_ulong ptr, int data32)
4934{
4935 int fpus, fptag, exp, i;
4936 uint64_t mant;
4937 CPU86_LDoubleU tmp;
4938
4939 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4940 fptag = 0;
4941 for (i=7; i>=0; i--) {
4942 fptag <<= 2;
4943 if (env->fptags[i]) {
4944 fptag |= 3;
4945 } else {
4946 tmp.d = env->fpregs[i].d;
4947 exp = EXPD(tmp);
4948 mant = MANTD(tmp);
4949 if (exp == 0 && mant == 0) {
4950 /* zero */
4951 fptag |= 1;
4952 } else if (exp == 0 || exp == MAXEXPD
4953#ifdef USE_X86LDOUBLE
4954 || (mant & (1LL << 63)) == 0
4955#endif
4956 ) {
4957 /* NaNs, infinity, denormal */
4958 fptag |= 2;
4959 }
4960 }
4961 }
4962 if (data32) {
4963 /* 32 bit */
4964 stl(ptr, env->fpuc);
4965 stl(ptr + 4, fpus);
4966 stl(ptr + 8, fptag);
4967 stl(ptr + 12, 0); /* fpip */
4968 stl(ptr + 16, 0); /* fpcs */
4969 stl(ptr + 20, 0); /* fpoo */
4970 stl(ptr + 24, 0); /* fpos */
4971 } else {
4972 /* 16 bit */
4973 stw(ptr, env->fpuc);
4974 stw(ptr + 2, fpus);
4975 stw(ptr + 4, fptag);
4976 stw(ptr + 6, 0);
4977 stw(ptr + 8, 0);
4978 stw(ptr + 10, 0);
4979 stw(ptr + 12, 0);
4980 }
4981}
4982
4983void helper_fldenv(target_ulong ptr, int data32)
4984{
4985 int i, fpus, fptag;
4986
4987 if (data32) {
4988 env->fpuc = lduw(ptr);
4989 fpus = lduw(ptr + 4);
4990 fptag = lduw(ptr + 8);
4991 }
4992 else {
4993 env->fpuc = lduw(ptr);
4994 fpus = lduw(ptr + 2);
4995 fptag = lduw(ptr + 4);
4996 }
4997 env->fpstt = (fpus >> 11) & 7;
4998 env->fpus = fpus & ~0x3800;
4999 for(i = 0;i < 8; i++) {
5000 env->fptags[i] = ((fptag & 3) == 3);
5001 fptag >>= 2;
5002 }
5003}
5004
5005void helper_fsave(target_ulong ptr, int data32)
5006{
5007 CPU86_LDouble tmp;
5008 int i;
5009
5010 helper_fstenv(ptr, data32);
5011
5012 ptr += (14 << data32);
5013 for(i = 0;i < 8; i++) {
5014 tmp = ST(i);
5015 helper_fstt(tmp, ptr);
5016 ptr += 10;
5017 }
5018
5019 /* fninit */
5020 env->fpus = 0;
5021 env->fpstt = 0;
5022 env->fpuc = 0x37f;
5023 env->fptags[0] = 1;
5024 env->fptags[1] = 1;
5025 env->fptags[2] = 1;
5026 env->fptags[3] = 1;
5027 env->fptags[4] = 1;
5028 env->fptags[5] = 1;
5029 env->fptags[6] = 1;
5030 env->fptags[7] = 1;
5031}
5032
5033void helper_frstor(target_ulong ptr, int data32)
5034{
5035 CPU86_LDouble tmp;
5036 int i;
5037
5038 helper_fldenv(ptr, data32);
5039 ptr += (14 << data32);
5040
5041 for(i = 0;i < 8; i++) {
5042 tmp = helper_fldt(ptr);
5043 ST(i) = tmp;
5044 ptr += 10;
5045 }
5046}
5047
5048void helper_fxsave(target_ulong ptr, int data64)
5049{
5050 int fpus, fptag, i, nb_xmm_regs;
5051 CPU86_LDouble tmp;
5052 target_ulong addr;
5053
5054 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5055 fptag = 0;
5056 for(i = 0; i < 8; i++) {
5057 fptag |= (env->fptags[i] << i);
5058 }
5059 stw(ptr, env->fpuc);
5060 stw(ptr + 2, fpus);
5061 stw(ptr + 4, fptag ^ 0xff);
5062#ifdef TARGET_X86_64
5063 if (data64) {
5064 stq(ptr + 0x08, 0); /* rip */
5065 stq(ptr + 0x10, 0); /* rdp */
5066 } else
5067#endif
5068 {
5069 stl(ptr + 0x08, 0); /* eip */
5070 stl(ptr + 0x0c, 0); /* sel */
5071 stl(ptr + 0x10, 0); /* dp */
5072 stl(ptr + 0x14, 0); /* sel */
5073 }
5074
5075 addr = ptr + 0x20;
5076 for(i = 0;i < 8; i++) {
5077 tmp = ST(i);
5078 helper_fstt(tmp, addr);
5079 addr += 16;
5080 }
5081
5082 if (env->cr[4] & CR4_OSFXSR_MASK) {
5083 /* XXX: finish it */
5084 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5085 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5086 if (env->hflags & HF_CS64_MASK)
5087 nb_xmm_regs = 16;
5088 else
5089 nb_xmm_regs = 8;
5090 addr = ptr + 0xa0;
5091 for(i = 0; i < nb_xmm_regs; i++) {
5092 stq(addr, env->xmm_regs[i].XMM_Q(0));
5093 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5094 addr += 16;
5095 }
5096 }
5097}
5098
5099void helper_fxrstor(target_ulong ptr, int data64)
5100{
5101 int i, fpus, fptag, nb_xmm_regs;
5102 CPU86_LDouble tmp;
5103 target_ulong addr;
5104
5105 env->fpuc = lduw(ptr);
5106 fpus = lduw(ptr + 2);
5107 fptag = lduw(ptr + 4);
5108 env->fpstt = (fpus >> 11) & 7;
5109 env->fpus = fpus & ~0x3800;
5110 fptag ^= 0xff;
5111 for(i = 0;i < 8; i++) {
5112 env->fptags[i] = ((fptag >> i) & 1);
5113 }
5114
5115 addr = ptr + 0x20;
5116 for(i = 0;i < 8; i++) {
5117 tmp = helper_fldt(addr);
5118 ST(i) = tmp;
5119 addr += 16;
5120 }
5121
5122 if (env->cr[4] & CR4_OSFXSR_MASK) {
5123 /* XXX: finish it */
5124 env->mxcsr = ldl(ptr + 0x18);
5125 //ldl(ptr + 0x1c);
5126 if (env->hflags & HF_CS64_MASK)
5127 nb_xmm_regs = 16;
5128 else
5129 nb_xmm_regs = 8;
5130 addr = ptr + 0xa0;
5131 for(i = 0; i < nb_xmm_regs; i++) {
5132#if !defined(VBOX) || __GNUC__ < 4
5133 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5134 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5135#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5136# if 1
5137 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5138 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5139 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5140 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5141# else
5142 /* this works fine on Mac OS X, gcc 4.0.1 */
5143 uint64_t u64 = ldq(addr);
5144 env->xmm_regs[i].XMM_Q(0);
5145 u64 = ldq(addr + 4);
5146 env->xmm_regs[i].XMM_Q(1) = u64;
5147# endif
5148#endif
5149 addr += 16;
5150 }
5151 }
5152}
5153
5154#ifndef USE_X86LDOUBLE
5155
5156void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5157{
5158 CPU86_LDoubleU temp;
5159 int e;
5160
5161 temp.d = f;
5162 /* mantissa */
5163 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5164 /* exponent + sign */
5165 e = EXPD(temp) - EXPBIAS + 16383;
5166 e |= SIGND(temp) >> 16;
5167 *pexp = e;
5168}
5169
5170CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5171{
5172 CPU86_LDoubleU temp;
5173 int e;
5174 uint64_t ll;
5175
5176 /* XXX: handle overflow ? */
5177 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5178 e |= (upper >> 4) & 0x800; /* sign */
5179 ll = (mant >> 11) & ((1LL << 52) - 1);
5180#ifdef __arm__
5181 temp.l.upper = (e << 20) | (ll >> 32);
5182 temp.l.lower = ll;
5183#else
5184 temp.ll = ll | ((uint64_t)e << 52);
5185#endif
5186 return temp.d;
5187}
5188
5189#else
5190
5191void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5192{
5193 CPU86_LDoubleU temp;
5194
5195 temp.d = f;
5196 *pmant = temp.l.lower;
5197 *pexp = temp.l.upper;
5198}
5199
5200CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5201{
5202 CPU86_LDoubleU temp;
5203
5204 temp.l.upper = upper;
5205 temp.l.lower = mant;
5206 return temp.d;
5207}
5208#endif
5209
5210#ifdef TARGET_X86_64
5211
5212//#define DEBUG_MULDIV
5213
5214static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5215{
5216 *plow += a;
5217 /* carry test */
5218 if (*plow < a)
5219 (*phigh)++;
5220 *phigh += b;
5221}
5222
5223static void neg128(uint64_t *plow, uint64_t *phigh)
5224{
5225 *plow = ~ *plow;
5226 *phigh = ~ *phigh;
5227 add128(plow, phigh, 1, 0);
5228}
5229
5230/* return TRUE if overflow */
5231static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5232{
5233 uint64_t q, r, a1, a0;
5234 int i, qb, ab;
5235
5236 a0 = *plow;
5237 a1 = *phigh;
5238 if (a1 == 0) {
5239 q = a0 / b;
5240 r = a0 % b;
5241 *plow = q;
5242 *phigh = r;
5243 } else {
5244 if (a1 >= b)
5245 return 1;
5246 /* XXX: use a better algorithm */
5247 for(i = 0; i < 64; i++) {
5248 ab = a1 >> 63;
5249 a1 = (a1 << 1) | (a0 >> 63);
5250 if (ab || a1 >= b) {
5251 a1 -= b;
5252 qb = 1;
5253 } else {
5254 qb = 0;
5255 }
5256 a0 = (a0 << 1) | qb;
5257 }
5258#if defined(DEBUG_MULDIV)
5259 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5260 *phigh, *plow, b, a0, a1);
5261#endif
5262 *plow = a0;
5263 *phigh = a1;
5264 }
5265 return 0;
5266}
5267
5268/* return TRUE if overflow */
5269static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5270{
5271 int sa, sb;
5272 sa = ((int64_t)*phigh < 0);
5273 if (sa)
5274 neg128(plow, phigh);
5275 sb = (b < 0);
5276 if (sb)
5277 b = -b;
5278 if (div64(plow, phigh, b) != 0)
5279 return 1;
5280 if (sa ^ sb) {
5281 if (*plow > (1ULL << 63))
5282 return 1;
5283 *plow = - *plow;
5284 } else {
5285 if (*plow >= (1ULL << 63))
5286 return 1;
5287 }
5288 if (sa)
5289 *phigh = - *phigh;
5290 return 0;
5291}
5292
5293void helper_mulq_EAX_T0(target_ulong t0)
5294{
5295 uint64_t r0, r1;
5296
5297 mulu64(&r0, &r1, EAX, t0);
5298 EAX = r0;
5299 EDX = r1;
5300 CC_DST = r0;
5301 CC_SRC = r1;
5302}
5303
5304void helper_imulq_EAX_T0(target_ulong t0)
5305{
5306 uint64_t r0, r1;
5307
5308 muls64(&r0, &r1, EAX, t0);
5309 EAX = r0;
5310 EDX = r1;
5311 CC_DST = r0;
5312 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5313}
5314
5315target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5316{
5317 uint64_t r0, r1;
5318
5319 muls64(&r0, &r1, t0, t1);
5320 CC_DST = r0;
5321 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5322 return r0;
5323}
5324
5325void helper_divq_EAX(target_ulong t0)
5326{
5327 uint64_t r0, r1;
5328 if (t0 == 0) {
5329 raise_exception(EXCP00_DIVZ);
5330 }
5331 r0 = EAX;
5332 r1 = EDX;
5333 if (div64(&r0, &r1, t0))
5334 raise_exception(EXCP00_DIVZ);
5335 EAX = r0;
5336 EDX = r1;
5337}
5338
5339void helper_idivq_EAX(target_ulong t0)
5340{
5341 uint64_t r0, r1;
5342 if (t0 == 0) {
5343 raise_exception(EXCP00_DIVZ);
5344 }
5345 r0 = EAX;
5346 r1 = EDX;
5347 if (idiv64(&r0, &r1, t0))
5348 raise_exception(EXCP00_DIVZ);
5349 EAX = r0;
5350 EDX = r1;
5351}
5352#endif
5353
5354static void do_hlt(void)
5355{
5356 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5357 env->halted = 1;
5358 env->exception_index = EXCP_HLT;
5359 cpu_loop_exit();
5360}
5361
5362void helper_hlt(int next_eip_addend)
5363{
5364 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5365 EIP += next_eip_addend;
5366
5367 do_hlt();
5368}
5369
5370void helper_monitor(target_ulong ptr)
5371{
5372#ifdef VBOX
5373 if ((uint32_t)ECX > 1)
5374 raise_exception(EXCP0D_GPF);
5375#else
5376 if ((uint32_t)ECX != 0)
5377 raise_exception(EXCP0D_GPF);
5378#endif
5379 /* XXX: store address ? */
5380 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5381}
5382
5383void helper_mwait(int next_eip_addend)
5384{
5385 if ((uint32_t)ECX != 0)
5386 raise_exception(EXCP0D_GPF);
5387#ifdef VBOX
5388 helper_hlt(next_eip_addend);
5389#else
5390 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5391 EIP += next_eip_addend;
5392
5393 /* XXX: not complete but not completely erroneous */
5394 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5395 /* more than one CPU: do not sleep because another CPU may
5396 wake this one */
5397 } else {
5398 do_hlt();
5399 }
5400#endif
5401}
5402
5403void helper_debug(void)
5404{
5405 env->exception_index = EXCP_DEBUG;
5406 cpu_loop_exit();
5407}
5408
5409void helper_raise_interrupt(int intno, int next_eip_addend)
5410{
5411 raise_interrupt(intno, 1, 0, next_eip_addend);
5412}
5413
5414void helper_raise_exception(int exception_index)
5415{
5416 raise_exception(exception_index);
5417}
5418
5419void helper_cli(void)
5420{
5421 env->eflags &= ~IF_MASK;
5422}
5423
5424void helper_sti(void)
5425{
5426 env->eflags |= IF_MASK;
5427}
5428
5429#ifdef VBOX
5430void helper_cli_vme(void)
5431{
5432 env->eflags &= ~VIF_MASK;
5433}
5434
5435void helper_sti_vme(void)
5436{
5437 /* First check, then change eflags according to the AMD manual */
5438 if (env->eflags & VIP_MASK) {
5439 raise_exception(EXCP0D_GPF);
5440 }
5441 env->eflags |= VIF_MASK;
5442}
5443#endif
5444
5445#if 0
5446/* vm86plus instructions */
5447void helper_cli_vm(void)
5448{
5449 env->eflags &= ~VIF_MASK;
5450}
5451
5452void helper_sti_vm(void)
5453{
5454 env->eflags |= VIF_MASK;
5455 if (env->eflags & VIP_MASK) {
5456 raise_exception(EXCP0D_GPF);
5457 }
5458}
5459#endif
5460
5461void helper_set_inhibit_irq(void)
5462{
5463 env->hflags |= HF_INHIBIT_IRQ_MASK;
5464}
5465
5466void helper_reset_inhibit_irq(void)
5467{
5468 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5469}
5470
5471void helper_boundw(target_ulong a0, int v)
5472{
5473 int low, high;
5474 low = ldsw(a0);
5475 high = ldsw(a0 + 2);
5476 v = (int16_t)v;
5477 if (v < low || v > high) {
5478 raise_exception(EXCP05_BOUND);
5479 }
5480 FORCE_RET();
5481}
5482
5483void helper_boundl(target_ulong a0, int v)
5484{
5485 int low, high;
5486 low = ldl(a0);
5487 high = ldl(a0 + 4);
5488 if (v < low || v > high) {
5489 raise_exception(EXCP05_BOUND);
5490 }
5491 FORCE_RET();
5492}
5493
5494static float approx_rsqrt(float a)
5495{
5496 return 1.0 / sqrt(a);
5497}
5498
5499static float approx_rcp(float a)
5500{
5501 return 1.0 / a;
5502}
5503
5504#if !defined(CONFIG_USER_ONLY)
5505
5506#define MMUSUFFIX _mmu
5507
5508#define SHIFT 0
5509#include "softmmu_template.h"
5510
5511#define SHIFT 1
5512#include "softmmu_template.h"
5513
5514#define SHIFT 2
5515#include "softmmu_template.h"
5516
5517#define SHIFT 3
5518#include "softmmu_template.h"
5519
5520#endif
5521
5522#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5523/* This code assumes real physical address always fit into host CPU reg,
5524 which is wrong in general, but true for our current use cases. */
5525RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5526{
5527 return remR3PhysReadS8(addr);
5528}
5529RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5530{
5531 return remR3PhysReadU8(addr);
5532}
5533void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5534{
5535 remR3PhysWriteU8(addr, val);
5536}
5537RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5538{
5539 return remR3PhysReadS16(addr);
5540}
5541RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5542{
5543 return remR3PhysReadU16(addr);
5544}
5545void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5546{
5547 remR3PhysWriteU16(addr, val);
5548}
5549RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5550{
5551 return remR3PhysReadS32(addr);
5552}
5553RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5554{
5555 return remR3PhysReadU32(addr);
5556}
5557void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5558{
5559 remR3PhysWriteU32(addr, val);
5560}
5561uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5562{
5563 return remR3PhysReadU64(addr);
5564}
5565void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5566{
5567 remR3PhysWriteU64(addr, val);
5568}
5569#endif
5570
5571/* try to fill the TLB and return an exception if error. If retaddr is
5572 NULL, it means that the function was called in C code (i.e. not
5573 from generated code or from helper.c) */
5574/* XXX: fix it to restore all registers */
5575void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5576{
5577 TranslationBlock *tb;
5578 int ret;
5579 unsigned long pc;
5580 CPUX86State *saved_env;
5581
5582 /* XXX: hack to restore env in all cases, even if not called from
5583 generated code */
5584 saved_env = env;
5585 env = cpu_single_env;
5586
5587 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5588 if (ret) {
5589 if (retaddr) {
5590 /* now we have a real cpu fault */
5591 pc = (unsigned long)retaddr;
5592 tb = tb_find_pc(pc);
5593 if (tb) {
5594 /* the PC is inside the translated code. It means that we have
5595 a virtual CPU fault */
5596 cpu_restore_state(tb, env, pc, NULL);
5597 }
5598 }
5599 raise_exception_err(env->exception_index, env->error_code);
5600 }
5601 env = saved_env;
5602}
5603
5604#ifdef VBOX
5605
5606/**
5607 * Correctly computes the eflags.
5608 * @returns eflags.
5609 * @param env1 CPU environment.
5610 */
5611uint32_t raw_compute_eflags(CPUX86State *env1)
5612{
5613 CPUX86State *savedenv = env;
5614 uint32_t efl;
5615 env = env1;
5616 efl = compute_eflags();
5617 env = savedenv;
5618 return efl;
5619}
5620
5621/**
5622 * Reads byte from virtual address in guest memory area.
5623 * XXX: is it working for any addresses? swapped out pages?
5624 * @returns read data byte.
5625 * @param env1 CPU environment.
5626 * @param pvAddr GC Virtual address.
5627 */
5628uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5629{
5630 CPUX86State *savedenv = env;
5631 uint8_t u8;
5632 env = env1;
5633 u8 = ldub_kernel(addr);
5634 env = savedenv;
5635 return u8;
5636}
5637
5638/**
5639 * Reads byte from virtual address in guest memory area.
5640 * XXX: is it working for any addresses? swapped out pages?
5641 * @returns read data byte.
5642 * @param env1 CPU environment.
5643 * @param pvAddr GC Virtual address.
5644 */
5645uint16_t read_word(CPUX86State *env1, target_ulong addr)
5646{
5647 CPUX86State *savedenv = env;
5648 uint16_t u16;
5649 env = env1;
5650 u16 = lduw_kernel(addr);
5651 env = savedenv;
5652 return u16;
5653}
5654
5655/**
5656 * Reads byte from virtual address in guest memory area.
5657 * XXX: is it working for any addresses? swapped out pages?
5658 * @returns read data byte.
5659 * @param env1 CPU environment.
5660 * @param pvAddr GC Virtual address.
5661 */
5662uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5663{
5664 CPUX86State *savedenv = env;
5665 uint32_t u32;
5666 env = env1;
5667 u32 = ldl_kernel(addr);
5668 env = savedenv;
5669 return u32;
5670}
5671
5672/**
5673 * Writes byte to virtual address in guest memory area.
5674 * XXX: is it working for any addresses? swapped out pages?
5675 * @returns read data byte.
5676 * @param env1 CPU environment.
5677 * @param pvAddr GC Virtual address.
5678 * @param val byte value
5679 */
5680void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5681{
5682 CPUX86State *savedenv = env;
5683 env = env1;
5684 stb(addr, val);
5685 env = savedenv;
5686}
5687
5688void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5689{
5690 CPUX86State *savedenv = env;
5691 env = env1;
5692 stw(addr, val);
5693 env = savedenv;
5694}
5695
5696void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5697{
5698 CPUX86State *savedenv = env;
5699 env = env1;
5700 stl(addr, val);
5701 env = savedenv;
5702}
5703
5704/**
5705 * Correctly loads selector into segment register with updating internal
5706 * qemu data/caches.
5707 * @param env1 CPU environment.
5708 * @param seg_reg Segment register.
5709 * @param selector Selector to load.
5710 */
5711void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5712{
5713 CPUX86State *savedenv = env;
5714#ifdef FORCE_SEGMENT_SYNC
5715 jmp_buf old_buf;
5716#endif
5717
5718 env = env1;
5719
5720 if ( env->eflags & X86_EFL_VM
5721 || !(env->cr[0] & X86_CR0_PE))
5722 {
5723 load_seg_vm(seg_reg, selector);
5724
5725 env = savedenv;
5726
5727 /* Successful sync. */
5728 env1->segs[seg_reg].newselector = 0;
5729 }
5730 else
5731 {
5732 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5733 time critical - let's not do that */
5734#ifdef FORCE_SEGMENT_SYNC
5735 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5736#endif
5737 if (setjmp(env1->jmp_env) == 0)
5738 {
5739 if (seg_reg == R_CS)
5740 {
5741 uint32_t e1, e2;
5742 e1 = e2 = 0;
5743 load_segment(&e1, &e2, selector);
5744 cpu_x86_load_seg_cache(env, R_CS, selector,
5745 get_seg_base(e1, e2),
5746 get_seg_limit(e1, e2),
5747 e2);
5748 }
5749 else
5750 helper_load_seg(seg_reg, selector);
5751 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5752 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5753
5754 env = savedenv;
5755
5756 /* Successful sync. */
5757 env1->segs[seg_reg].newselector = 0;
5758 }
5759 else
5760 {
5761 env = savedenv;
5762
5763 /* Postpone sync until the guest uses the selector. */
5764 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5765 env1->segs[seg_reg].newselector = selector;
5766 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5767 env1->exception_index = -1;
5768 env1->error_code = 0;
5769 env1->old_exception = -1;
5770 }
5771#ifdef FORCE_SEGMENT_SYNC
5772 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5773#endif
5774 }
5775
5776}
5777
5778DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5779{
5780 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5781}
5782
5783
5784int emulate_single_instr(CPUX86State *env1)
5785{
5786 TranslationBlock *tb;
5787 TranslationBlock *current;
5788 int flags;
5789 uint8_t *tc_ptr;
5790 target_ulong old_eip;
5791
5792 /* ensures env is loaded! */
5793 CPUX86State *savedenv = env;
5794 env = env1;
5795
5796 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5797
5798 current = env->current_tb;
5799 env->current_tb = NULL;
5800 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5801
5802 /*
5803 * Translate only one instruction.
5804 */
5805 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5806 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5807 env->segs[R_CS].base, flags, 0);
5808
5809 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5810
5811
5812 /* tb_link_phys: */
5813 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5814 tb->jmp_next[0] = NULL;
5815 tb->jmp_next[1] = NULL;
5816 Assert(tb->jmp_next[0] == NULL);
5817 Assert(tb->jmp_next[1] == NULL);
5818 if (tb->tb_next_offset[0] != 0xffff)
5819 tb_reset_jump(tb, 0);
5820 if (tb->tb_next_offset[1] != 0xffff)
5821 tb_reset_jump(tb, 1);
5822
5823 /*
5824 * Execute it using emulation
5825 */
5826 old_eip = env->eip;
5827 env->current_tb = tb;
5828
5829 /*
5830 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5831 * perhaps not a very safe hack
5832 */
5833 while(old_eip == env->eip)
5834 {
5835 tc_ptr = tb->tc_ptr;
5836
5837#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5838 int fake_ret;
5839 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5840#else
5841 tcg_qemu_tb_exec(tc_ptr);
5842#endif
5843 /*
5844 * Exit once we detect an external interrupt and interrupts are enabled
5845 */
5846 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5847 ( (env->eflags & IF_MASK) &&
5848 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5849 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5850 {
5851 break;
5852 }
5853 }
5854 env->current_tb = current;
5855
5856 tb_phys_invalidate(tb, -1);
5857 tb_free(tb);
5858/*
5859 Assert(tb->tb_next_offset[0] == 0xffff);
5860 Assert(tb->tb_next_offset[1] == 0xffff);
5861 Assert(tb->tb_next[0] == 0xffff);
5862 Assert(tb->tb_next[1] == 0xffff);
5863 Assert(tb->jmp_next[0] == NULL);
5864 Assert(tb->jmp_next[1] == NULL);
5865 Assert(tb->jmp_first == NULL); */
5866
5867 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5868
5869 /*
5870 * Execute the next instruction when we encounter instruction fusing.
5871 */
5872 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5873 {
5874 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5875 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5876 emulate_single_instr(env);
5877 }
5878
5879 env = savedenv;
5880 return 0;
5881}
5882
5883/**
5884 * Correctly loads a new ldtr selector.
5885 *
5886 * @param env1 CPU environment.
5887 * @param selector Selector to load.
5888 */
5889void sync_ldtr(CPUX86State *env1, int selector)
5890{
5891 CPUX86State *saved_env = env;
5892 if (setjmp(env1->jmp_env) == 0)
5893 {
5894 env = env1;
5895 helper_lldt(selector);
5896 env = saved_env;
5897 }
5898 else
5899 {
5900 env = saved_env;
5901#ifdef VBOX_STRICT
5902 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5903#endif
5904 }
5905}
5906
5907int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5908 uint32_t *esp_ptr, int dpl)
5909{
5910 int type, index, shift;
5911
5912 CPUX86State *savedenv = env;
5913 env = env1;
5914
5915 if (!(env->tr.flags & DESC_P_MASK))
5916 cpu_abort(env, "invalid tss");
5917 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5918 if ((type & 7) != 1)
5919 cpu_abort(env, "invalid tss type %d", type);
5920 shift = type >> 3;
5921 index = (dpl * 4 + 2) << shift;
5922 if (index + (4 << shift) - 1 > env->tr.limit)
5923 {
5924 env = savedenv;
5925 return 0;
5926 }
5927 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5928
5929 if (shift == 0) {
5930 *esp_ptr = lduw_kernel(env->tr.base + index);
5931 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5932 } else {
5933 *esp_ptr = ldl_kernel(env->tr.base + index);
5934 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5935 }
5936
5937 env = savedenv;
5938 return 1;
5939}
5940
5941//*****************************************************************************
5942// Needs to be at the bottom of the file (overriding macros)
5943
5944#ifndef VBOX
5945static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5946#else /* VBOX */
5947DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5948#endif /* VBOX */
5949{
5950 return *(CPU86_LDouble *)ptr;
5951}
5952
5953#ifndef VBOX
5954static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5955#else /* VBOX */
5956DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5957#endif /* VBOX */
5958{
5959 *(CPU86_LDouble *)ptr = f;
5960}
5961
5962#undef stw
5963#undef stl
5964#undef stq
5965#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5966#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5967#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5968
5969//*****************************************************************************
5970void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5971{
5972 int fpus, fptag, i, nb_xmm_regs;
5973 CPU86_LDouble tmp;
5974 uint8_t *addr;
5975 int data64 = !!(env->hflags & HF_LMA_MASK);
5976
5977 if (env->cpuid_features & CPUID_FXSR)
5978 {
5979 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5980 fptag = 0;
5981 for(i = 0; i < 8; i++) {
5982 fptag |= (env->fptags[i] << i);
5983 }
5984 stw(ptr, env->fpuc);
5985 stw(ptr + 2, fpus);
5986 stw(ptr + 4, fptag ^ 0xff);
5987
5988 addr = ptr + 0x20;
5989 for(i = 0;i < 8; i++) {
5990 tmp = ST(i);
5991 helper_fstt_raw(tmp, addr);
5992 addr += 16;
5993 }
5994
5995 if (env->cr[4] & CR4_OSFXSR_MASK) {
5996 /* XXX: finish it */
5997 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5998 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5999 nb_xmm_regs = 8 << data64;
6000 addr = ptr + 0xa0;
6001 for(i = 0; i < nb_xmm_regs; i++) {
6002#if __GNUC__ < 4
6003 stq(addr, env->xmm_regs[i].XMM_Q(0));
6004 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6005#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6006 stl(addr, env->xmm_regs[i].XMM_L(0));
6007 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6008 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6009 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6010#endif
6011 addr += 16;
6012 }
6013 }
6014 }
6015 else
6016 {
6017 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6018 int fptag;
6019
6020 fp->FCW = env->fpuc;
6021 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6022 fptag = 0;
6023 for (i=7; i>=0; i--) {
6024 fptag <<= 2;
6025 if (env->fptags[i]) {
6026 fptag |= 3;
6027 } else {
6028 /* the FPU automatically computes it */
6029 }
6030 }
6031 fp->FTW = fptag;
6032
6033 for(i = 0;i < 8; i++) {
6034 tmp = ST(i);
6035 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6036 }
6037 }
6038}
6039
6040//*****************************************************************************
6041#undef lduw
6042#undef ldl
6043#undef ldq
6044#define lduw(a) *(uint16_t *)(a)
6045#define ldl(a) *(uint32_t *)(a)
6046#define ldq(a) *(uint64_t *)(a)
6047//*****************************************************************************
6048void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6049{
6050 int i, fpus, fptag, nb_xmm_regs;
6051 CPU86_LDouble tmp;
6052 uint8_t *addr;
6053 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6054
6055 if (env->cpuid_features & CPUID_FXSR)
6056 {
6057 env->fpuc = lduw(ptr);
6058 fpus = lduw(ptr + 2);
6059 fptag = lduw(ptr + 4);
6060 env->fpstt = (fpus >> 11) & 7;
6061 env->fpus = fpus & ~0x3800;
6062 fptag ^= 0xff;
6063 for(i = 0;i < 8; i++) {
6064 env->fptags[i] = ((fptag >> i) & 1);
6065 }
6066
6067 addr = ptr + 0x20;
6068 for(i = 0;i < 8; i++) {
6069 tmp = helper_fldt_raw(addr);
6070 ST(i) = tmp;
6071 addr += 16;
6072 }
6073
6074 if (env->cr[4] & CR4_OSFXSR_MASK) {
6075 /* XXX: finish it, endianness */
6076 env->mxcsr = ldl(ptr + 0x18);
6077 //ldl(ptr + 0x1c);
6078 nb_xmm_regs = 8 << data64;
6079 addr = ptr + 0xa0;
6080 for(i = 0; i < nb_xmm_regs; i++) {
6081#if HC_ARCH_BITS == 32
6082 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6083 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6084 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6085 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6086 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6087#else
6088 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6089 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6090#endif
6091 addr += 16;
6092 }
6093 }
6094 }
6095 else
6096 {
6097 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6098 int fptag, j;
6099
6100 env->fpuc = fp->FCW;
6101 env->fpstt = (fp->FSW >> 11) & 7;
6102 env->fpus = fp->FSW & ~0x3800;
6103 fptag = fp->FTW;
6104 for(i = 0;i < 8; i++) {
6105 env->fptags[i] = ((fptag & 3) == 3);
6106 fptag >>= 2;
6107 }
6108 j = env->fpstt;
6109 for(i = 0;i < 8; i++) {
6110 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6111 ST(i) = tmp;
6112 }
6113 }
6114}
6115//*****************************************************************************
6116//*****************************************************************************
6117
6118#endif /* VBOX */
6119
6120/* Secure Virtual Machine helpers */
6121
6122#if defined(CONFIG_USER_ONLY)
6123
6124void helper_vmrun(int aflag, int next_eip_addend)
6125{
6126}
6127void helper_vmmcall(void)
6128{
6129}
6130void helper_vmload(int aflag)
6131{
6132}
6133void helper_vmsave(int aflag)
6134{
6135}
6136void helper_stgi(void)
6137{
6138}
6139void helper_clgi(void)
6140{
6141}
6142void helper_skinit(void)
6143{
6144}
6145void helper_invlpga(int aflag)
6146{
6147}
6148void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6149{
6150}
6151void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6152{
6153}
6154
6155void helper_svm_check_io(uint32_t port, uint32_t param,
6156 uint32_t next_eip_addend)
6157{
6158}
6159#else
6160
6161#ifndef VBOX
6162static inline void svm_save_seg(target_phys_addr_t addr,
6163#else /* VBOX */
6164DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6165#endif /* VBOX */
6166 const SegmentCache *sc)
6167{
6168 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6169 sc->selector);
6170 stq_phys(addr + offsetof(struct vmcb_seg, base),
6171 sc->base);
6172 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6173 sc->limit);
6174 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6175 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6176}
6177
6178#ifndef VBOX
6179static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6180#else /* VBOX */
6181DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6182#endif /* VBOX */
6183{
6184 unsigned int flags;
6185
6186 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6187 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6188 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6189 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6190 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6191}
6192
6193#ifndef VBOX
6194static inline void svm_load_seg_cache(target_phys_addr_t addr,
6195#else /* VBOX */
6196DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6197#endif /* VBOX */
6198 CPUState *env, int seg_reg)
6199{
6200 SegmentCache sc1, *sc = &sc1;
6201 svm_load_seg(addr, sc);
6202 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6203 sc->base, sc->limit, sc->flags);
6204}
6205
6206void helper_vmrun(int aflag, int next_eip_addend)
6207{
6208 target_ulong addr;
6209 uint32_t event_inj;
6210 uint32_t int_ctl;
6211
6212 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6213
6214 if (aflag == 2)
6215 addr = EAX;
6216 else
6217 addr = (uint32_t)EAX;
6218
6219 if (loglevel & CPU_LOG_TB_IN_ASM)
6220 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6221
6222 env->vm_vmcb = addr;
6223
6224 /* save the current CPU state in the hsave page */
6225 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6226 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6227
6228 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6229 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6230
6231 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6232 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6233 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6234 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6235 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6236 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6237
6238 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6239 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6240
6241 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6242 &env->segs[R_ES]);
6243 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6244 &env->segs[R_CS]);
6245 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6246 &env->segs[R_SS]);
6247 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6248 &env->segs[R_DS]);
6249
6250 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6251 EIP + next_eip_addend);
6252 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6253 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6254
6255 /* load the interception bitmaps so we do not need to access the
6256 vmcb in svm mode */
6257 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6258 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6259 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6260 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6261 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6262 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6263
6264 /* enable intercepts */
6265 env->hflags |= HF_SVMI_MASK;
6266
6267 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6268
6269 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6270 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6271
6272 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6273 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6274
6275 /* clear exit_info_2 so we behave like the real hardware */
6276 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6277
6278 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6279 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6280 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6281 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6282 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6283 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6284 if (int_ctl & V_INTR_MASKING_MASK) {
6285 env->v_tpr = int_ctl & V_TPR_MASK;
6286 env->hflags2 |= HF2_VINTR_MASK;
6287 if (env->eflags & IF_MASK)
6288 env->hflags2 |= HF2_HIF_MASK;
6289 }
6290
6291 cpu_load_efer(env,
6292 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6293 env->eflags = 0;
6294 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6295 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6296 CC_OP = CC_OP_EFLAGS;
6297
6298 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6299 env, R_ES);
6300 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6301 env, R_CS);
6302 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6303 env, R_SS);
6304 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6305 env, R_DS);
6306
6307 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6308 env->eip = EIP;
6309 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6310 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6311 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6312 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6313 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6314
6315 /* FIXME: guest state consistency checks */
6316
6317 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6318 case TLB_CONTROL_DO_NOTHING:
6319 break;
6320 case TLB_CONTROL_FLUSH_ALL_ASID:
6321 /* FIXME: this is not 100% correct but should work for now */
6322 tlb_flush(env, 1);
6323 break;
6324 }
6325
6326 env->hflags2 |= HF2_GIF_MASK;
6327
6328 if (int_ctl & V_IRQ_MASK) {
6329 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6330 }
6331
6332 /* maybe we need to inject an event */
6333 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6334 if (event_inj & SVM_EVTINJ_VALID) {
6335 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6336 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6337 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6338 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6339
6340 if (loglevel & CPU_LOG_TB_IN_ASM)
6341 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6342 /* FIXME: need to implement valid_err */
6343 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6344 case SVM_EVTINJ_TYPE_INTR:
6345 env->exception_index = vector;
6346 env->error_code = event_inj_err;
6347 env->exception_is_int = 0;
6348 env->exception_next_eip = -1;
6349 if (loglevel & CPU_LOG_TB_IN_ASM)
6350 fprintf(logfile, "INTR");
6351 /* XXX: is it always correct ? */
6352 do_interrupt(vector, 0, 0, 0, 1);
6353 break;
6354 case SVM_EVTINJ_TYPE_NMI:
6355 env->exception_index = EXCP02_NMI;
6356 env->error_code = event_inj_err;
6357 env->exception_is_int = 0;
6358 env->exception_next_eip = EIP;
6359 if (loglevel & CPU_LOG_TB_IN_ASM)
6360 fprintf(logfile, "NMI");
6361 cpu_loop_exit();
6362 break;
6363 case SVM_EVTINJ_TYPE_EXEPT:
6364 env->exception_index = vector;
6365 env->error_code = event_inj_err;
6366 env->exception_is_int = 0;
6367 env->exception_next_eip = -1;
6368 if (loglevel & CPU_LOG_TB_IN_ASM)
6369 fprintf(logfile, "EXEPT");
6370 cpu_loop_exit();
6371 break;
6372 case SVM_EVTINJ_TYPE_SOFT:
6373 env->exception_index = vector;
6374 env->error_code = event_inj_err;
6375 env->exception_is_int = 1;
6376 env->exception_next_eip = EIP;
6377 if (loglevel & CPU_LOG_TB_IN_ASM)
6378 fprintf(logfile, "SOFT");
6379 cpu_loop_exit();
6380 break;
6381 }
6382 if (loglevel & CPU_LOG_TB_IN_ASM)
6383 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6384 }
6385}
6386
6387void helper_vmmcall(void)
6388{
6389 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6390 raise_exception(EXCP06_ILLOP);
6391}
6392
6393void helper_vmload(int aflag)
6394{
6395 target_ulong addr;
6396 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6397
6398 if (aflag == 2)
6399 addr = EAX;
6400 else
6401 addr = (uint32_t)EAX;
6402
6403 if (loglevel & CPU_LOG_TB_IN_ASM)
6404 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6405 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6406 env->segs[R_FS].base);
6407
6408 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6409 env, R_FS);
6410 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6411 env, R_GS);
6412 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6413 &env->tr);
6414 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6415 &env->ldt);
6416
6417#ifdef TARGET_X86_64
6418 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6419 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6420 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6421 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6422#endif
6423 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6424 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6425 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6426 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6427}
6428
6429void helper_vmsave(int aflag)
6430{
6431 target_ulong addr;
6432 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6433
6434 if (aflag == 2)
6435 addr = EAX;
6436 else
6437 addr = (uint32_t)EAX;
6438
6439 if (loglevel & CPU_LOG_TB_IN_ASM)
6440 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6441 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6442 env->segs[R_FS].base);
6443
6444 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6445 &env->segs[R_FS]);
6446 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6447 &env->segs[R_GS]);
6448 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6449 &env->tr);
6450 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6451 &env->ldt);
6452
6453#ifdef TARGET_X86_64
6454 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6455 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6456 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6457 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6458#endif
6459 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6460 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6461 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6462 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6463}
6464
6465void helper_stgi(void)
6466{
6467 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6468 env->hflags2 |= HF2_GIF_MASK;
6469}
6470
6471void helper_clgi(void)
6472{
6473 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6474 env->hflags2 &= ~HF2_GIF_MASK;
6475}
6476
6477void helper_skinit(void)
6478{
6479 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6480 /* XXX: not implemented */
6481 raise_exception(EXCP06_ILLOP);
6482}
6483
6484void helper_invlpga(int aflag)
6485{
6486 target_ulong addr;
6487 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6488
6489 if (aflag == 2)
6490 addr = EAX;
6491 else
6492 addr = (uint32_t)EAX;
6493
6494 /* XXX: could use the ASID to see if it is needed to do the
6495 flush */
6496 tlb_flush_page(env, addr);
6497}
6498
6499void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6500{
6501 if (likely(!(env->hflags & HF_SVMI_MASK)))
6502 return;
6503#ifndef VBOX
6504 switch(type) {
6505#ifndef VBOX
6506 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6507#else
6508 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6509 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6510 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6511#endif
6512 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6513 helper_vmexit(type, param);
6514 }
6515 break;
6516#ifndef VBOX
6517 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6518#else
6519 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6520 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6521 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6522#endif
6523 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6524 helper_vmexit(type, param);
6525 }
6526 break;
6527 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6528 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6529 helper_vmexit(type, param);
6530 }
6531 break;
6532 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6533 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6534 helper_vmexit(type, param);
6535 }
6536 break;
6537 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6538 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6539 helper_vmexit(type, param);
6540 }
6541 break;
6542 case SVM_EXIT_MSR:
6543 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6544 /* FIXME: this should be read in at vmrun (faster this way?) */
6545 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6546 uint32_t t0, t1;
6547 switch((uint32_t)ECX) {
6548 case 0 ... 0x1fff:
6549 t0 = (ECX * 2) % 8;
6550 t1 = ECX / 8;
6551 break;
6552 case 0xc0000000 ... 0xc0001fff:
6553 t0 = (8192 + ECX - 0xc0000000) * 2;
6554 t1 = (t0 / 8);
6555 t0 %= 8;
6556 break;
6557 case 0xc0010000 ... 0xc0011fff:
6558 t0 = (16384 + ECX - 0xc0010000) * 2;
6559 t1 = (t0 / 8);
6560 t0 %= 8;
6561 break;
6562 default:
6563 helper_vmexit(type, param);
6564 t0 = 0;
6565 t1 = 0;
6566 break;
6567 }
6568 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6569 helper_vmexit(type, param);
6570 }
6571 break;
6572 default:
6573 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6574 helper_vmexit(type, param);
6575 }
6576 break;
6577 }
6578#else
6579 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6580#endif
6581}
6582
6583void helper_svm_check_io(uint32_t port, uint32_t param,
6584 uint32_t next_eip_addend)
6585{
6586 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6587 /* FIXME: this should be read in at vmrun (faster this way?) */
6588 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6589 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6590 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6591 /* next EIP */
6592 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6593 env->eip + next_eip_addend);
6594 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6595 }
6596 }
6597}
6598
6599/* Note: currently only 32 bits of exit_code are used */
6600void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6601{
6602 uint32_t int_ctl;
6603
6604 if (loglevel & CPU_LOG_TB_IN_ASM)
6605 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6606 exit_code, exit_info_1,
6607 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6608 EIP);
6609
6610 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6611 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6612 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6613 } else {
6614 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6615 }
6616
6617 /* Save the VM state in the vmcb */
6618 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6619 &env->segs[R_ES]);
6620 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6621 &env->segs[R_CS]);
6622 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6623 &env->segs[R_SS]);
6624 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6625 &env->segs[R_DS]);
6626
6627 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6628 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6629
6630 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6631 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6632
6633 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6634 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6635 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6636 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6637 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6638
6639 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6640 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6641 int_ctl |= env->v_tpr & V_TPR_MASK;
6642 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6643 int_ctl |= V_IRQ_MASK;
6644 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6645
6646 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6647 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6648 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6649 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6650 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6652 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6653
6654 /* Reload the host state from vm_hsave */
6655 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6656 env->hflags &= ~HF_SVMI_MASK;
6657 env->intercept = 0;
6658 env->intercept_exceptions = 0;
6659 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6660 env->tsc_offset = 0;
6661
6662 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6663 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6664
6665 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6666 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6667
6668 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6669 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6670 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6671 /* we need to set the efer after the crs so the hidden flags get
6672 set properly */
6673 cpu_load_efer(env,
6674 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6675 env->eflags = 0;
6676 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6677 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6678 CC_OP = CC_OP_EFLAGS;
6679
6680 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6681 env, R_ES);
6682 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6683 env, R_CS);
6684 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6685 env, R_SS);
6686 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6687 env, R_DS);
6688
6689 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6690 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6691 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6692
6693 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6694 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6695
6696 /* other setups */
6697 cpu_x86_set_cpl(env, 0);
6698 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6699 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6700
6701 env->hflags2 &= ~HF2_GIF_MASK;
6702 /* FIXME: Resets the current ASID register to zero (host ASID). */
6703
6704 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6705
6706 /* Clears the TSC_OFFSET inside the processor. */
6707
6708 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6709 from the page table indicated the host's CR3. If the PDPEs contain
6710 illegal state, the processor causes a shutdown. */
6711
6712 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6713 env->cr[0] |= CR0_PE_MASK;
6714 env->eflags &= ~VM_MASK;
6715
6716 /* Disables all breakpoints in the host DR7 register. */
6717
6718 /* Checks the reloaded host state for consistency. */
6719
6720 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6721 host's code segment or non-canonical (in the case of long mode), a
6722 #GP fault is delivered inside the host.) */
6723
6724 /* remove any pending exception */
6725 env->exception_index = -1;
6726 env->error_code = 0;
6727 env->old_exception = -1;
6728
6729 cpu_loop_exit();
6730}
6731
6732#endif
6733
6734/* MMX/SSE */
6735/* XXX: optimize by storing fptt and fptags in the static cpu state */
6736void helper_enter_mmx(void)
6737{
6738 env->fpstt = 0;
6739 *(uint32_t *)(env->fptags) = 0;
6740 *(uint32_t *)(env->fptags + 4) = 0;
6741}
6742
6743void helper_emms(void)
6744{
6745 /* set to empty state */
6746 *(uint32_t *)(env->fptags) = 0x01010101;
6747 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6748}
6749
6750/* XXX: suppress */
6751void helper_movq(uint64_t *d, uint64_t *s)
6752{
6753 *d = *s;
6754}
6755
6756#define SHIFT 0
6757#include "ops_sse.h"
6758
6759#define SHIFT 1
6760#include "ops_sse.h"
6761
6762#define SHIFT 0
6763#include "helper_template.h"
6764#undef SHIFT
6765
6766#define SHIFT 1
6767#include "helper_template.h"
6768#undef SHIFT
6769
6770#define SHIFT 2
6771#include "helper_template.h"
6772#undef SHIFT
6773
6774#ifdef TARGET_X86_64
6775
6776#define SHIFT 3
6777#include "helper_template.h"
6778#undef SHIFT
6779
6780#endif
6781
6782/* bit operations */
6783target_ulong helper_bsf(target_ulong t0)
6784{
6785 int count;
6786 target_ulong res;
6787
6788 res = t0;
6789 count = 0;
6790 while ((res & 1) == 0) {
6791 count++;
6792 res >>= 1;
6793 }
6794 return count;
6795}
6796
6797target_ulong helper_bsr(target_ulong t0)
6798{
6799 int count;
6800 target_ulong res, mask;
6801
6802 res = t0;
6803 count = TARGET_LONG_BITS - 1;
6804 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6805 while ((res & mask) == 0) {
6806 count--;
6807 res <<= 1;
6808 }
6809 return count;
6810}
6811
6812
6813static int compute_all_eflags(void)
6814{
6815 return CC_SRC;
6816}
6817
6818static int compute_c_eflags(void)
6819{
6820 return CC_SRC & CC_C;
6821}
6822
6823#ifndef VBOX
6824CCTable cc_table[CC_OP_NB] = {
6825 [CC_OP_DYNAMIC] = { /* should never happen */ },
6826
6827 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6828
6829 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6830 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6831 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6832
6833 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6834 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6835 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6836
6837 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6838 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6839 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6840
6841 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6842 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6843 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6844
6845 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6846 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6847 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6848
6849 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6850 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6851 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6852
6853 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6854 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6855 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6856
6857 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6858 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6859 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6860
6861 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6862 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6863 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6864
6865 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6866 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6867 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6868
6869#ifdef TARGET_X86_64
6870 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6871
6872 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6873
6874 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6875
6876 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6877
6878 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6879
6880 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6881
6882 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6883
6884 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6885
6886 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6887
6888 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6889#endif
6890};
6891#else /* VBOX */
6892/* Sync carefully with cpu.h */
6893CCTable cc_table[CC_OP_NB] = {
6894 /* CC_OP_DYNAMIC */ { 0, 0 },
6895
6896 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6897
6898 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6899 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6900 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6901#ifdef TARGET_X86_64
6902 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6903#else
6904 /* CC_OP_MULQ */ { 0, 0 },
6905#endif
6906
6907 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6908 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6909 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6910#ifdef TARGET_X86_64
6911 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6912#else
6913 /* CC_OP_ADDQ */ { 0, 0 },
6914#endif
6915
6916 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6917 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6918 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6919#ifdef TARGET_X86_64
6920 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6921#else
6922 /* CC_OP_ADCQ */ { 0, 0 },
6923#endif
6924
6925 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6926 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6927 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6928#ifdef TARGET_X86_64
6929 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6930#else
6931 /* CC_OP_SUBQ */ { 0, 0 },
6932#endif
6933
6934 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6935 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6936 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6937#ifdef TARGET_X86_64
6938 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6939#else
6940 /* CC_OP_SBBQ */ { 0, 0 },
6941#endif
6942
6943 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6944 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6945 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6946#ifdef TARGET_X86_64
6947 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6948#else
6949 /* CC_OP_LOGICQ */ { 0, 0 },
6950#endif
6951
6952 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6953 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6954 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6955#ifdef TARGET_X86_64
6956 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6957#else
6958 /* CC_OP_INCQ */ { 0, 0 },
6959#endif
6960
6961 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6962 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6963 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6964#ifdef TARGET_X86_64
6965 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6966#else
6967 /* CC_OP_DECQ */ { 0, 0 },
6968#endif
6969
6970 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6971 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6972 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6973#ifdef TARGET_X86_64
6974 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6975#else
6976 /* CC_OP_SHLQ */ { 0, 0 },
6977#endif
6978
6979 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
6980 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
6981 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
6982#ifdef TARGET_X86_64
6983 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
6984#else
6985 /* CC_OP_SARQ */ { 0, 0 },
6986#endif
6987};
6988#endif /* VBOX */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette