VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/op_helper.c@ 16894

最後變更 在這個檔案從16894是 16505,由 vboxsync 提交於 16 年 前

REM: safer fix for 3588

檔案大小: 202.2 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#define CPU_NO_GLOBAL_REGS
30#include "exec.h"
31#include "host-utils.h"
32
33#ifdef VBOX
34# ifdef VBOX_WITH_VMI
35# include <VBox/parav.h>
36# endif
37#include "qemu-common.h"
38#include <math.h>
39#include "tcg.h"
40#endif
41//#define DEBUG_PCALL
42
43#if 0
44#define raise_exception_err(a, b)\
45do {\
46 if (logfile)\
47 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48 (raise_exception_err)(a, b);\
49} while (0)
50#endif
51
52const uint8_t parity_table[256] = {
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85};
86
87/* modulo 17 table */
88const uint8_t rclw_table[32] = {
89 0, 1, 2, 3, 4, 5, 6, 7,
90 8, 9,10,11,12,13,14,15,
91 16, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 9,10,11,12,13,14,
93};
94
95/* modulo 9 table */
96const uint8_t rclb_table[32] = {
97 0, 1, 2, 3, 4, 5, 6, 7,
98 8, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 0, 1, 2, 3, 4, 5,
100 6, 7, 8, 0, 1, 2, 3, 4,
101};
102
103const CPU86_LDouble f15rk[7] =
104{
105 0.00000000000000000000L,
106 1.00000000000000000000L,
107 3.14159265358979323851L, /*pi*/
108 0.30102999566398119523L, /*lg2*/
109 0.69314718055994530943L, /*ln2*/
110 1.44269504088896340739L, /*l2e*/
111 3.32192809488736234781L, /*l2t*/
112};
113
114/* broken thread support */
115
116spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117
118void helper_lock(void)
119{
120 spin_lock(&global_cpu_lock);
121}
122
123void helper_unlock(void)
124{
125 spin_unlock(&global_cpu_lock);
126}
127
128void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129{
130 load_eflags(t0, update_mask);
131}
132
133target_ulong helper_read_eflags(void)
134{
135 uint32_t eflags;
136 eflags = cc_table[CC_OP].compute_all();
137 eflags |= (DF & DF_MASK);
138 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139 return eflags;
140}
141
142#ifdef VBOX
143void helper_write_eflags_vme(target_ulong t0)
144{
145 unsigned int new_eflags = t0;
146
147 assert(env->eflags & (1<<VM_SHIFT));
148
149 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
150 /* if TF will be set -> #GP */
151 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
152 || (new_eflags & TF_MASK)) {
153 raise_exception(EXCP0D_GPF);
154 } else {
155 load_eflags(new_eflags,
156 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
157
158 if (new_eflags & IF_MASK) {
159 env->eflags |= VIF_MASK;
160 } else {
161 env->eflags &= ~VIF_MASK;
162 }
163 }
164}
165
166target_ulong helper_read_eflags_vme(void)
167{
168 uint32_t eflags;
169 eflags = cc_table[CC_OP].compute_all();
170 eflags |= (DF & DF_MASK);
171 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
172 if (env->eflags & VIF_MASK)
173 eflags |= IF_MASK;
174 else
175 eflags &= ~IF_MASK;
176
177 /* According to AMD manual, should be read with IOPL == 3 */
178 eflags |= (3 << IOPL_SHIFT);
179
180 /* We only use helper_read_eflags_vme() in 16-bits mode */
181 return eflags & 0xffff;
182}
183
184void helper_dump_state()
185{
186 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
187 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
188 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
189 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
190 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
191 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
192 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
193}
194#endif
195
196/* return non zero if error */
197#ifndef VBOX
198static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
199#else /* VBOX */
200DECLINLINE(int) load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
201#endif /* VBOX */
202 int selector)
203{
204 SegmentCache *dt;
205 int index;
206 target_ulong ptr;
207
208#ifdef VBOX
209 /* Trying to load a selector with CPL=1? */
210 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
211 {
212 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
213 selector = selector & 0xfffc;
214 }
215#endif
216
217 if (selector & 0x4)
218 dt = &env->ldt;
219 else
220 dt = &env->gdt;
221 index = selector & ~7;
222 if ((index + 7) > dt->limit)
223 return -1;
224 ptr = dt->base + index;
225 *e1_ptr = ldl_kernel(ptr);
226 *e2_ptr = ldl_kernel(ptr + 4);
227 return 0;
228}
229
230#ifndef VBOX
231static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
232#else /* VBOX */
233DECLINLINE(unsigned int) get_seg_limit(uint32_t e1, uint32_t e2)
234#endif /* VBOX */
235{
236 unsigned int limit;
237 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
238 if (e2 & DESC_G_MASK)
239 limit = (limit << 12) | 0xfff;
240 return limit;
241}
242
243#ifndef VBOX
244static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
245#else /* VBOX */
246DECLINLINE(uint32_t) get_seg_base(uint32_t e1, uint32_t e2)
247#endif /* VBOX */
248{
249 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
250}
251
252#ifndef VBOX
253static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
254#else /* VBOX */
255DECLINLINE(void) load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
256#endif /* VBOX */
257{
258 sc->base = get_seg_base(e1, e2);
259 sc->limit = get_seg_limit(e1, e2);
260 sc->flags = e2;
261}
262
263/* init the segment cache in vm86 mode. */
264#ifndef VBOX
265static inline void load_seg_vm(int seg, int selector)
266#else /* VBOX */
267DECLINLINE(void) load_seg_vm(int seg, int selector)
268#endif /* VBOX */
269{
270 selector &= 0xffff;
271#ifdef VBOX
272 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
273
274 if (seg == R_CS)
275 flags |= DESC_CS_MASK;
276
277 cpu_x86_load_seg_cache(env, seg, selector,
278 (selector << 4), 0xffff, flags);
279#else
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282#endif
283}
284
285#ifndef VBOX
286static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
287#else /* VBOX */
288DECLINLINE(void) get_ss_esp_from_tss(uint32_t *ss_ptr,
289#endif /* VBOX */
290 uint32_t *esp_ptr, int dpl)
291{
292#ifndef VBOX
293 int type, index, shift;
294#else
295 unsigned int type, index, shift;
296#endif
297
298#if 0
299 {
300 int i;
301 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
302 for(i=0;i<env->tr.limit;i++) {
303 printf("%02x ", env->tr.base[i]);
304 if ((i & 7) == 7) printf("\n");
305 }
306 printf("\n");
307 }
308#endif
309
310 if (!(env->tr.flags & DESC_P_MASK))
311 cpu_abort(env, "invalid tss");
312 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if ((type & 7) != 1)
314 cpu_abort(env, "invalid tss type");
315 shift = type >> 3;
316 index = (dpl * 4 + 2) << shift;
317 if (index + (4 << shift) - 1 > env->tr.limit)
318 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
319 if (shift == 0) {
320 *esp_ptr = lduw_kernel(env->tr.base + index);
321 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
322 } else {
323 *esp_ptr = ldl_kernel(env->tr.base + index);
324 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
325 }
326}
327
328/* XXX: merge with load_seg() */
329static void tss_load_seg(int seg_reg, int selector)
330{
331 uint32_t e1, e2;
332 int rpl, dpl, cpl;
333
334#ifdef VBOX
335 e1 = e2 = 0;
336 cpl = env->hflags & HF_CPL_MASK;
337 /* Trying to load a selector with CPL=1? */
338 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
339 {
340 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
341 selector = selector & 0xfffc;
342 }
343#endif
344
345 if ((selector & 0xfffc) != 0) {
346 if (load_segment(&e1, &e2, selector) != 0)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 if (!(e2 & DESC_S_MASK))
349 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
350 rpl = selector & 3;
351 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
352 cpl = env->hflags & HF_CPL_MASK;
353 if (seg_reg == R_CS) {
354 if (!(e2 & DESC_CS_MASK))
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 /* XXX: is it correct ? */
357 if (dpl != rpl)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if ((e2 & DESC_C_MASK) && dpl > rpl)
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 } else if (seg_reg == R_SS) {
362 /* SS must be writable data */
363 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
364 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
365 if (dpl != cpl || dpl != rpl)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 } else {
368 /* not readable code */
369 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
370 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
371 /* if data or non conforming code, checks the rights */
372 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
373 if (dpl < cpl || dpl < rpl)
374 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
375 }
376 }
377 if (!(e2 & DESC_P_MASK))
378 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
379 cpu_x86_load_seg_cache(env, seg_reg, selector,
380 get_seg_base(e1, e2),
381 get_seg_limit(e1, e2),
382 e2);
383 } else {
384 if (seg_reg == R_SS || seg_reg == R_CS)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386#ifdef VBOX
387#if 0
388 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */
389 cpu_x86_load_seg_cache(env, seg_reg, selector,
390 0, 0, 0);
391#endif
392#endif
393 }
394}
395
396#define SWITCH_TSS_JMP 0
397#define SWITCH_TSS_IRET 1
398#define SWITCH_TSS_CALL 2
399
400/* XXX: restore CPU state in registers (PowerPC case) */
401static void switch_tss(int tss_selector,
402 uint32_t e1, uint32_t e2, int source,
403 uint32_t next_eip)
404{
405 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
406 target_ulong tss_base;
407 uint32_t new_regs[8], new_segs[6];
408 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
409 uint32_t old_eflags, eflags_mask;
410 SegmentCache *dt;
411#ifndef VBOX
412 int index;
413#else
414 unsigned int index;
415#endif
416 target_ulong ptr;
417
418 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
419#ifdef DEBUG_PCALL
420 if (loglevel & CPU_LOG_PCALL)
421 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
422#endif
423
424#if defined(VBOX) && defined(DEBUG)
425 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
426#endif
427
428 /* if task gate, we read the TSS segment and we load it */
429 if (type == 5) {
430 if (!(e2 & DESC_P_MASK))
431 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
432 tss_selector = e1 >> 16;
433 if (tss_selector & 4)
434 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
435 if (load_segment(&e1, &e2, tss_selector) != 0)
436 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
437 if (e2 & DESC_S_MASK)
438 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
440 if ((type & 7) != 1)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 }
443
444 if (!(e2 & DESC_P_MASK))
445 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
446
447 if (type & 8)
448 tss_limit_max = 103;
449 else
450 tss_limit_max = 43;
451 tss_limit = get_seg_limit(e1, e2);
452 tss_base = get_seg_base(e1, e2);
453 if ((tss_selector & 4) != 0 ||
454 tss_limit < tss_limit_max)
455 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
456 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
457 if (old_type & 8)
458 old_tss_limit_max = 103;
459 else
460 old_tss_limit_max = 43;
461
462 /* read all the registers from the new TSS */
463 if (type & 8) {
464 /* 32 bit */
465 new_cr3 = ldl_kernel(tss_base + 0x1c);
466 new_eip = ldl_kernel(tss_base + 0x20);
467 new_eflags = ldl_kernel(tss_base + 0x24);
468 for(i = 0; i < 8; i++)
469 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
470 for(i = 0; i < 6; i++)
471 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
472 new_ldt = lduw_kernel(tss_base + 0x60);
473 new_trap = ldl_kernel(tss_base + 0x64);
474 } else {
475 /* 16 bit */
476 new_cr3 = 0;
477 new_eip = lduw_kernel(tss_base + 0x0e);
478 new_eflags = lduw_kernel(tss_base + 0x10);
479 for(i = 0; i < 8; i++)
480 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
481 for(i = 0; i < 4; i++)
482 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
483 new_ldt = lduw_kernel(tss_base + 0x2a);
484 new_segs[R_FS] = 0;
485 new_segs[R_GS] = 0;
486 new_trap = 0;
487 }
488
489 /* NOTE: we must avoid memory exceptions during the task switch,
490 so we make dummy accesses before */
491 /* XXX: it can still fail in some cases, so a bigger hack is
492 necessary to valid the TLB after having done the accesses */
493
494 v1 = ldub_kernel(env->tr.base);
495 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
496 stb_kernel(env->tr.base, v1);
497 stb_kernel(env->tr.base + old_tss_limit_max, v2);
498
499 /* clear busy bit (it is restartable) */
500 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
501 target_ulong ptr;
502 uint32_t e2;
503 ptr = env->gdt.base + (env->tr.selector & ~7);
504 e2 = ldl_kernel(ptr + 4);
505 e2 &= ~DESC_TSS_BUSY_MASK;
506 stl_kernel(ptr + 4, e2);
507 }
508 old_eflags = compute_eflags();
509 if (source == SWITCH_TSS_IRET)
510 old_eflags &= ~NT_MASK;
511
512 /* save the current state in the old TSS */
513 if (type & 8) {
514 /* 32 bit */
515 stl_kernel(env->tr.base + 0x20, next_eip);
516 stl_kernel(env->tr.base + 0x24, old_eflags);
517 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
518 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
519 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
520 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
521 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
522 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
523 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
524 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
525 for(i = 0; i < 6; i++)
526 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
527#if defined(VBOX) && defined(DEBUG)
528 printf("TSS 32 bits switch\n");
529 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
530#endif
531 } else {
532 /* 16 bit */
533 stw_kernel(env->tr.base + 0x0e, next_eip);
534 stw_kernel(env->tr.base + 0x10, old_eflags);
535 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
536 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
537 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
538 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
539 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
540 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
541 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
542 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
543 for(i = 0; i < 4; i++)
544 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
545 }
546
547 /* now if an exception occurs, it will occurs in the next task
548 context */
549
550 if (source == SWITCH_TSS_CALL) {
551 stw_kernel(tss_base, env->tr.selector);
552 new_eflags |= NT_MASK;
553 }
554
555 /* set busy bit */
556 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
557 target_ulong ptr;
558 uint32_t e2;
559 ptr = env->gdt.base + (tss_selector & ~7);
560 e2 = ldl_kernel(ptr + 4);
561 e2 |= DESC_TSS_BUSY_MASK;
562 stl_kernel(ptr + 4, e2);
563 }
564
565 /* set the new CPU state */
566 /* from this point, any exception which occurs can give problems */
567 env->cr[0] |= CR0_TS_MASK;
568 env->hflags |= HF_TS_MASK;
569 env->tr.selector = tss_selector;
570 env->tr.base = tss_base;
571 env->tr.limit = tss_limit;
572 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
573
574 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
575 cpu_x86_update_cr3(env, new_cr3);
576 }
577
578 /* load all registers without an exception, then reload them with
579 possible exception */
580 env->eip = new_eip;
581 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
582 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
583 if (!(type & 8))
584 eflags_mask &= 0xffff;
585 load_eflags(new_eflags, eflags_mask);
586 /* XXX: what to do in 16 bit case ? */
587 EAX = new_regs[0];
588 ECX = new_regs[1];
589 EDX = new_regs[2];
590 EBX = new_regs[3];
591 ESP = new_regs[4];
592 EBP = new_regs[5];
593 ESI = new_regs[6];
594 EDI = new_regs[7];
595 if (new_eflags & VM_MASK) {
596 for(i = 0; i < 6; i++)
597 load_seg_vm(i, new_segs[i]);
598 /* in vm86, CPL is always 3 */
599 cpu_x86_set_cpl(env, 3);
600 } else {
601 /* CPL is set the RPL of CS */
602 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
603 /* first just selectors as the rest may trigger exceptions */
604 for(i = 0; i < 6; i++)
605 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
606 }
607
608 env->ldt.selector = new_ldt & ~4;
609 env->ldt.base = 0;
610 env->ldt.limit = 0;
611 env->ldt.flags = 0;
612
613 /* load the LDT */
614 if (new_ldt & 4)
615 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
616
617 if ((new_ldt & 0xfffc) != 0) {
618 dt = &env->gdt;
619 index = new_ldt & ~7;
620 if ((index + 7) > dt->limit)
621 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
622 ptr = dt->base + index;
623 e1 = ldl_kernel(ptr);
624 e2 = ldl_kernel(ptr + 4);
625 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
626 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
627 if (!(e2 & DESC_P_MASK))
628 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
629 load_seg_cache_raw_dt(&env->ldt, e1, e2);
630 }
631
632 /* load the segments */
633 if (!(new_eflags & VM_MASK)) {
634 tss_load_seg(R_CS, new_segs[R_CS]);
635 tss_load_seg(R_SS, new_segs[R_SS]);
636 tss_load_seg(R_ES, new_segs[R_ES]);
637 tss_load_seg(R_DS, new_segs[R_DS]);
638 tss_load_seg(R_FS, new_segs[R_FS]);
639 tss_load_seg(R_GS, new_segs[R_GS]);
640 }
641
642 /* check that EIP is in the CS segment limits */
643 if (new_eip > env->segs[R_CS].limit) {
644 /* XXX: different exception if CALL ? */
645 raise_exception_err(EXCP0D_GPF, 0);
646 }
647}
648
649/* check if Port I/O is allowed in TSS */
650#ifndef VBOX
651static inline void check_io(int addr, int size)
652{
653 int io_offset, val, mask;
654
655#else /* VBOX */
656DECLINLINE(void) check_io(int addr, int size)
657{
658 int val, mask;
659 unsigned int io_offset;
660#endif /* VBOX */
661 /* TSS must be a valid 32 bit one */
662 if (!(env->tr.flags & DESC_P_MASK) ||
663 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
664 env->tr.limit < 103)
665 goto fail;
666 io_offset = lduw_kernel(env->tr.base + 0x66);
667 io_offset += (addr >> 3);
668 /* Note: the check needs two bytes */
669 if ((io_offset + 1) > env->tr.limit)
670 goto fail;
671 val = lduw_kernel(env->tr.base + io_offset);
672 val >>= (addr & 7);
673 mask = (1 << size) - 1;
674 /* all bits must be zero to allow the I/O */
675 if ((val & mask) != 0) {
676 fail:
677 raise_exception_err(EXCP0D_GPF, 0);
678 }
679}
680
681#ifdef VBOX
682/* Keep in sync with gen_check_external_event() */
683void helper_check_external_event()
684{
685 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_EXIT
686 | CPU_INTERRUPT_EXTERNAL_TIMER
687 | CPU_INTERRUPT_EXTERNAL_DMA))
688 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
689 && (env->eflags & IF_MASK)
690 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
691 {
692 helper_external_event();
693 }
694
695}
696
697void helper_sync_seg(uint32_t reg)
698{
699 if (env->segs[reg].newselector)
700 sync_seg(env, reg, env->segs[reg].newselector);
701}
702#endif
703
704void helper_check_iob(uint32_t t0)
705{
706 check_io(t0, 1);
707}
708
709void helper_check_iow(uint32_t t0)
710{
711 check_io(t0, 2);
712}
713
714void helper_check_iol(uint32_t t0)
715{
716 check_io(t0, 4);
717}
718
719void helper_outb(uint32_t port, uint32_t data)
720{
721 cpu_outb(env, port, data & 0xff);
722}
723
724target_ulong helper_inb(uint32_t port)
725{
726 return cpu_inb(env, port);
727}
728
729void helper_outw(uint32_t port, uint32_t data)
730{
731 cpu_outw(env, port, data & 0xffff);
732}
733
734target_ulong helper_inw(uint32_t port)
735{
736 return cpu_inw(env, port);
737}
738
739void helper_outl(uint32_t port, uint32_t data)
740{
741 cpu_outl(env, port, data);
742}
743
744target_ulong helper_inl(uint32_t port)
745{
746 return cpu_inl(env, port);
747}
748
749#ifndef VBOX
750static inline unsigned int get_sp_mask(unsigned int e2)
751#else /* VBOX */
752DECLINLINE(unsigned int) get_sp_mask(unsigned int e2)
753#endif /* VBOX */
754{
755 if (e2 & DESC_B_MASK)
756 return 0xffffffff;
757 else
758 return 0xffff;
759}
760
761#ifdef TARGET_X86_64
762#define SET_ESP(val, sp_mask)\
763do {\
764 if ((sp_mask) == 0xffff)\
765 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
766 else if ((sp_mask) == 0xffffffffLL)\
767 ESP = (uint32_t)(val);\
768 else\
769 ESP = (val);\
770} while (0)
771#else
772#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
773#endif
774
775/* in 64-bit machines, this can overflow. So this segment addition macro
776 * can be used to trim the value to 32-bit whenever needed */
777#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
778
779/* XXX: add a is_user flag to have proper security support */
780#define PUSHW(ssp, sp, sp_mask, val)\
781{\
782 sp -= 2;\
783 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
784}
785
786#define PUSHL(ssp, sp, sp_mask, val)\
787{\
788 sp -= 4;\
789 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
790}
791
792#define POPW(ssp, sp, sp_mask, val)\
793{\
794 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
795 sp += 2;\
796}
797
798#define POPL(ssp, sp, sp_mask, val)\
799{\
800 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
801 sp += 4;\
802}
803
804/* protected mode interrupt */
805static void do_interrupt_protected(int intno, int is_int, int error_code,
806 unsigned int next_eip, int is_hw)
807{
808 SegmentCache *dt;
809 target_ulong ptr, ssp;
810 int type, dpl, selector, ss_dpl, cpl;
811 int has_error_code, new_stack, shift;
812 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
813 uint32_t old_eip, sp_mask;
814
815#ifdef VBOX
816 ss = ss_e1 = ss_e2 = 0;
817# ifdef VBOX_WITH_VMI
818 if ( intno == 6
819 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
820 {
821 env->exception_index = EXCP_PARAV_CALL;
822 cpu_loop_exit();
823 }
824# endif
825 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
826 cpu_loop_exit();
827#endif
828
829 has_error_code = 0;
830 if (!is_int && !is_hw) {
831 switch(intno) {
832 case 8:
833 case 10:
834 case 11:
835 case 12:
836 case 13:
837 case 14:
838 case 17:
839 has_error_code = 1;
840 break;
841 }
842 }
843 if (is_int)
844 old_eip = next_eip;
845 else
846 old_eip = env->eip;
847
848 dt = &env->idt;
849#ifndef VBOX
850 if (intno * 8 + 7 > dt->limit)
851#else
852 if ((unsigned)intno * 8 + 7 > dt->limit)
853#endif
854 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
855 ptr = dt->base + intno * 8;
856 e1 = ldl_kernel(ptr);
857 e2 = ldl_kernel(ptr + 4);
858 /* check gate type */
859 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
860 switch(type) {
861 case 5: /* task gate */
862 /* must do that check here to return the correct error code */
863 if (!(e2 & DESC_P_MASK))
864 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
865 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
866 if (has_error_code) {
867 int type;
868 uint32_t mask;
869 /* push the error code */
870 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
871 shift = type >> 3;
872 if (env->segs[R_SS].flags & DESC_B_MASK)
873 mask = 0xffffffff;
874 else
875 mask = 0xffff;
876 esp = (ESP - (2 << shift)) & mask;
877 ssp = env->segs[R_SS].base + esp;
878 if (shift)
879 stl_kernel(ssp, error_code);
880 else
881 stw_kernel(ssp, error_code);
882 SET_ESP(esp, mask);
883 }
884 return;
885 case 6: /* 286 interrupt gate */
886 case 7: /* 286 trap gate */
887 case 14: /* 386 interrupt gate */
888 case 15: /* 386 trap gate */
889 break;
890 default:
891 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
892 break;
893 }
894 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
895 cpl = env->hflags & HF_CPL_MASK;
896 /* check privilege if software int */
897 if (is_int && dpl < cpl)
898 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
899 /* check valid bit */
900 if (!(e2 & DESC_P_MASK))
901 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
902 selector = e1 >> 16;
903 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 if ((selector & 0xfffc) == 0)
905 raise_exception_err(EXCP0D_GPF, 0);
906
907 if (load_segment(&e1, &e2, selector) != 0)
908 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl)
913 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
916 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
917 /* to inner privilege */
918 get_ss_esp_from_tss(&ss, &esp, dpl);
919 if ((ss & 0xfffc) == 0)
920 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
921 if ((ss & 3) != dpl)
922 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
923 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
924 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
925 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
926 if (ss_dpl != dpl)
927 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
928 if (!(ss_e2 & DESC_S_MASK) ||
929 (ss_e2 & DESC_CS_MASK) ||
930 !(ss_e2 & DESC_W_MASK))
931 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
932 if (!(ss_e2 & DESC_P_MASK))
933#ifdef VBOX /* See page 3-477 of 253666.pdf */
934 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
935#else
936 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
937#endif
938 new_stack = 1;
939 sp_mask = get_sp_mask(ss_e2);
940 ssp = get_seg_base(ss_e1, ss_e2);
941#if defined(VBOX) && defined(DEBUG)
942 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
943#endif
944 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
945 /* to same privilege */
946 if (env->eflags & VM_MASK)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0;
949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
950 ssp = env->segs[R_SS].base;
951 esp = ESP;
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 sp_mask = 0; /* avoid warning */
957 ssp = 0; /* avoid warning */
958 esp = 0; /* avoid warning */
959 }
960
961 shift = type >> 3;
962
963#if 0
964 /* XXX: check that enough room is available */
965 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
966 if (env->eflags & VM_MASK)
967 push_size += 8;
968 push_size <<= shift;
969#endif
970 if (shift == 1) {
971 if (new_stack) {
972 if (env->eflags & VM_MASK) {
973 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
974 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
975 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
976 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
977 }
978 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
979 PUSHL(ssp, esp, sp_mask, ESP);
980 }
981 PUSHL(ssp, esp, sp_mask, compute_eflags());
982 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
983 PUSHL(ssp, esp, sp_mask, old_eip);
984 if (has_error_code) {
985 PUSHL(ssp, esp, sp_mask, error_code);
986 }
987 } else {
988 if (new_stack) {
989 if (env->eflags & VM_MASK) {
990 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
991 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
992 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
993 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
994 }
995 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
996 PUSHW(ssp, esp, sp_mask, ESP);
997 }
998 PUSHW(ssp, esp, sp_mask, compute_eflags());
999 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1000 PUSHW(ssp, esp, sp_mask, old_eip);
1001 if (has_error_code) {
1002 PUSHW(ssp, esp, sp_mask, error_code);
1003 }
1004 }
1005
1006 if (new_stack) {
1007 if (env->eflags & VM_MASK) {
1008 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1009 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1010 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1011 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1012 }
1013 ss = (ss & ~3) | dpl;
1014 cpu_x86_load_seg_cache(env, R_SS, ss,
1015 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1016 }
1017 SET_ESP(esp, sp_mask);
1018
1019 selector = (selector & ~3) | dpl;
1020 cpu_x86_load_seg_cache(env, R_CS, selector,
1021 get_seg_base(e1, e2),
1022 get_seg_limit(e1, e2),
1023 e2);
1024 cpu_x86_set_cpl(env, dpl);
1025 env->eip = offset;
1026
1027 /* interrupt gate clear IF mask */
1028 if ((type & 1) == 0) {
1029 env->eflags &= ~IF_MASK;
1030 }
1031 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1032}
1033#ifdef VBOX
1034
1035/* check if VME interrupt redirection is enabled in TSS */
1036DECLINLINE(bool) is_vme_irq_redirected(int intno)
1037{
1038 unsigned int io_offset, intredir_offset;
1039 unsigned char val, mask;
1040
1041 /* TSS must be a valid 32 bit one */
1042 if (!(env->tr.flags & DESC_P_MASK) ||
1043 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1044 env->tr.limit < 103)
1045 goto fail;
1046 io_offset = lduw_kernel(env->tr.base + 0x66);
1047 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1048 if (io_offset < 0x68 + 0x20)
1049 io_offset = 0x68 + 0x20;
1050 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1051 intredir_offset = io_offset - 0x20;
1052
1053 intredir_offset += (intno >> 3);
1054 if ((intredir_offset) > env->tr.limit)
1055 goto fail;
1056
1057 val = ldub_kernel(env->tr.base + intredir_offset);
1058 mask = 1 << (unsigned char)(intno & 7);
1059
1060 /* bit set means no redirection. */
1061 if ((val & mask) != 0) {
1062 return false;
1063 }
1064 return true;
1065
1066fail:
1067 raise_exception_err(EXCP0D_GPF, 0);
1068 return true;
1069}
1070
1071/* V86 mode software interrupt with CR4.VME=1 */
1072static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1073{
1074 target_ulong ptr, ssp;
1075 int selector;
1076 uint32_t offset, esp;
1077 uint32_t old_cs, old_eflags;
1078 uint32_t iopl;
1079
1080 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1081
1082 if (!is_vme_irq_redirected(intno))
1083 {
1084 if (iopl == 3)
1085 {
1086 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1087 return;
1088 }
1089 else
1090 raise_exception_err(EXCP0D_GPF, 0);
1091 }
1092
1093 /* virtual mode idt is at linear address 0 */
1094 ptr = 0 + intno * 4;
1095 offset = lduw_kernel(ptr);
1096 selector = lduw_kernel(ptr + 2);
1097 esp = ESP;
1098 ssp = env->segs[R_SS].base;
1099 old_cs = env->segs[R_CS].selector;
1100
1101 old_eflags = compute_eflags();
1102 if (iopl < 3)
1103 {
1104 /* copy VIF into IF and set IOPL to 3 */
1105 if (env->eflags & VIF_MASK)
1106 old_eflags |= IF_MASK;
1107 else
1108 old_eflags &= ~IF_MASK;
1109
1110 old_eflags |= (3 << IOPL_SHIFT);
1111 }
1112
1113 /* XXX: use SS segment size ? */
1114 PUSHW(ssp, esp, 0xffff, old_eflags);
1115 PUSHW(ssp, esp, 0xffff, old_cs);
1116 PUSHW(ssp, esp, 0xffff, next_eip);
1117
1118 /* update processor state */
1119 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1120 env->eip = offset;
1121 env->segs[R_CS].selector = selector;
1122 env->segs[R_CS].base = (selector << 4);
1123 env->eflags &= ~(TF_MASK | RF_MASK);
1124
1125 if (iopl < 3)
1126 env->eflags &= ~VIF_MASK;
1127 else
1128 env->eflags &= ~IF_MASK;
1129}
1130#endif /* VBOX */
1131
1132#ifdef TARGET_X86_64
1133
1134#define PUSHQ(sp, val)\
1135{\
1136 sp -= 8;\
1137 stq_kernel(sp, (val));\
1138}
1139
1140#define POPQ(sp, val)\
1141{\
1142 val = ldq_kernel(sp);\
1143 sp += 8;\
1144}
1145
1146#ifndef VBOX
1147static inline target_ulong get_rsp_from_tss(int level)
1148#else /* VBOX */
1149DECLINLINE(target_ulong) get_rsp_from_tss(int level)
1150#endif /* VBOX */
1151{
1152 int index;
1153
1154#if 0
1155 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1156 env->tr.base, env->tr.limit);
1157#endif
1158
1159 if (!(env->tr.flags & DESC_P_MASK))
1160 cpu_abort(env, "invalid tss");
1161 index = 8 * level + 4;
1162 if ((index + 7) > env->tr.limit)
1163 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1164 return ldq_kernel(env->tr.base + index);
1165}
1166
1167/* 64 bit interrupt */
1168static void do_interrupt64(int intno, int is_int, int error_code,
1169 target_ulong next_eip, int is_hw)
1170{
1171 SegmentCache *dt;
1172 target_ulong ptr;
1173 int type, dpl, selector, cpl, ist;
1174 int has_error_code, new_stack;
1175 uint32_t e1, e2, e3, ss;
1176 target_ulong old_eip, esp, offset;
1177
1178#ifdef VBOX
1179 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1180 cpu_loop_exit();
1181#endif
1182
1183 has_error_code = 0;
1184 if (!is_int && !is_hw) {
1185 switch(intno) {
1186 case 8:
1187 case 10:
1188 case 11:
1189 case 12:
1190 case 13:
1191 case 14:
1192 case 17:
1193 has_error_code = 1;
1194 break;
1195 }
1196 }
1197 if (is_int)
1198 old_eip = next_eip;
1199 else
1200 old_eip = env->eip;
1201
1202 dt = &env->idt;
1203 if (intno * 16 + 15 > dt->limit)
1204 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1205 ptr = dt->base + intno * 16;
1206 e1 = ldl_kernel(ptr);
1207 e2 = ldl_kernel(ptr + 4);
1208 e3 = ldl_kernel(ptr + 8);
1209 /* check gate type */
1210 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1211 switch(type) {
1212 case 14: /* 386 interrupt gate */
1213 case 15: /* 386 trap gate */
1214 break;
1215 default:
1216 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1217 break;
1218 }
1219 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1220 cpl = env->hflags & HF_CPL_MASK;
1221 /* check privilege if software int */
1222 if (is_int && dpl < cpl)
1223 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1224 /* check valid bit */
1225 if (!(e2 & DESC_P_MASK))
1226 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1227 selector = e1 >> 16;
1228 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1229 ist = e2 & 7;
1230 if ((selector & 0xfffc) == 0)
1231 raise_exception_err(EXCP0D_GPF, 0);
1232
1233 if (load_segment(&e1, &e2, selector) != 0)
1234 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1235 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1237 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1238 if (dpl > cpl)
1239 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1240 if (!(e2 & DESC_P_MASK))
1241 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1242 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1243 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1244 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1245 /* to inner privilege */
1246 if (ist != 0)
1247 esp = get_rsp_from_tss(ist + 3);
1248 else
1249 esp = get_rsp_from_tss(dpl);
1250 esp &= ~0xfLL; /* align stack */
1251 ss = 0;
1252 new_stack = 1;
1253 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1254 /* to same privilege */
1255 if (env->eflags & VM_MASK)
1256 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1257 new_stack = 0;
1258 if (ist != 0)
1259 esp = get_rsp_from_tss(ist + 3);
1260 else
1261 esp = ESP;
1262 esp &= ~0xfLL; /* align stack */
1263 dpl = cpl;
1264 } else {
1265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1266 new_stack = 0; /* avoid warning */
1267 esp = 0; /* avoid warning */
1268 }
1269
1270 PUSHQ(esp, env->segs[R_SS].selector);
1271 PUSHQ(esp, ESP);
1272 PUSHQ(esp, compute_eflags());
1273 PUSHQ(esp, env->segs[R_CS].selector);
1274 PUSHQ(esp, old_eip);
1275 if (has_error_code) {
1276 PUSHQ(esp, error_code);
1277 }
1278
1279 if (new_stack) {
1280 ss = 0 | dpl;
1281 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1282 }
1283 ESP = esp;
1284
1285 selector = (selector & ~3) | dpl;
1286 cpu_x86_load_seg_cache(env, R_CS, selector,
1287 get_seg_base(e1, e2),
1288 get_seg_limit(e1, e2),
1289 e2);
1290 cpu_x86_set_cpl(env, dpl);
1291 env->eip = offset;
1292
1293 /* interrupt gate clear IF mask */
1294 if ((type & 1) == 0) {
1295 env->eflags &= ~IF_MASK;
1296 }
1297 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1298}
1299#endif
1300
1301#if defined(CONFIG_USER_ONLY)
1302void helper_syscall(int next_eip_addend)
1303{
1304 env->exception_index = EXCP_SYSCALL;
1305 env->exception_next_eip = env->eip + next_eip_addend;
1306 cpu_loop_exit();
1307}
1308#else
1309void helper_syscall(int next_eip_addend)
1310{
1311 int selector;
1312
1313 if (!(env->efer & MSR_EFER_SCE)) {
1314 raise_exception_err(EXCP06_ILLOP, 0);
1315 }
1316 selector = (env->star >> 32) & 0xffff;
1317#ifdef TARGET_X86_64
1318 if (env->hflags & HF_LMA_MASK) {
1319 int code64;
1320
1321 ECX = env->eip + next_eip_addend;
1322 env->regs[11] = compute_eflags();
1323
1324 code64 = env->hflags & HF_CS64_MASK;
1325
1326 cpu_x86_set_cpl(env, 0);
1327 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1328 0, 0xffffffff,
1329 DESC_G_MASK | DESC_P_MASK |
1330 DESC_S_MASK |
1331 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1332 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1333 0, 0xffffffff,
1334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1335 DESC_S_MASK |
1336 DESC_W_MASK | DESC_A_MASK);
1337 env->eflags &= ~env->fmask;
1338 load_eflags(env->eflags, 0);
1339 if (code64)
1340 env->eip = env->lstar;
1341 else
1342 env->eip = env->cstar;
1343 } else
1344#endif
1345 {
1346 ECX = (uint32_t)(env->eip + next_eip_addend);
1347
1348 cpu_x86_set_cpl(env, 0);
1349 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1350 0, 0xffffffff,
1351 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1352 DESC_S_MASK |
1353 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1354 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1355 0, 0xffffffff,
1356 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1357 DESC_S_MASK |
1358 DESC_W_MASK | DESC_A_MASK);
1359 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1360 env->eip = (uint32_t)env->star;
1361 }
1362}
1363#endif
1364
1365void helper_sysret(int dflag)
1366{
1367 int cpl, selector;
1368
1369 if (!(env->efer & MSR_EFER_SCE)) {
1370 raise_exception_err(EXCP06_ILLOP, 0);
1371 }
1372 cpl = env->hflags & HF_CPL_MASK;
1373 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1374 raise_exception_err(EXCP0D_GPF, 0);
1375 }
1376 selector = (env->star >> 48) & 0xffff;
1377#ifdef TARGET_X86_64
1378 if (env->hflags & HF_LMA_MASK) {
1379 if (dflag == 2) {
1380 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1381 0, 0xffffffff,
1382 DESC_G_MASK | DESC_P_MASK |
1383 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1384 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1385 DESC_L_MASK);
1386 env->eip = ECX;
1387 } else {
1388 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1389 0, 0xffffffff,
1390 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1391 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1392 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1393 env->eip = (uint32_t)ECX;
1394 }
1395 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1396 0, 0xffffffff,
1397 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1398 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1399 DESC_W_MASK | DESC_A_MASK);
1400 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1401 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1402 cpu_x86_set_cpl(env, 3);
1403 } else
1404#endif
1405 {
1406 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1407 0, 0xffffffff,
1408 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1409 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1410 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1411 env->eip = (uint32_t)ECX;
1412 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1413 0, 0xffffffff,
1414 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1415 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1416 DESC_W_MASK | DESC_A_MASK);
1417 env->eflags |= IF_MASK;
1418 cpu_x86_set_cpl(env, 3);
1419 }
1420#ifdef USE_KQEMU
1421 if (kqemu_is_ok(env)) {
1422 if (env->hflags & HF_LMA_MASK)
1423 CC_OP = CC_OP_EFLAGS;
1424 env->exception_index = -1;
1425 cpu_loop_exit();
1426 }
1427#endif
1428}
1429
1430#ifdef VBOX
1431/**
1432 * Checks and processes external VMM events.
1433 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1434 */
1435void helper_external_event(void)
1436{
1437#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1438 uintptr_t uSP;
1439# ifdef RT_ARCH_AMD64
1440 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1441# else
1442 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1443# endif
1444 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1445#endif
1446 /* Keep in sync with flags checked by gen_check_external_event() */
1447 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1448 {
1449 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1450 ~CPU_INTERRUPT_EXTERNAL_HARD);
1451 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1452 }
1453 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1454 {
1455 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1456 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1457 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1458 }
1459 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1460 {
1461 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1462 ~CPU_INTERRUPT_EXTERNAL_DMA);
1463 remR3DmaRun(env);
1464 }
1465 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1466 {
1467 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1468 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1469 remR3TimersRun(env);
1470 }
1471}
1472/* helper for recording call instruction addresses for later scanning */
1473void helper_record_call()
1474{
1475 if ( !(env->state & CPU_RAW_RING0)
1476 && (env->cr[0] & CR0_PG_MASK)
1477 && !(env->eflags & X86_EFL_IF))
1478 remR3RecordCall(env);
1479}
1480#endif /* VBOX */
1481
1482/* real mode interrupt */
1483static void do_interrupt_real(int intno, int is_int, int error_code,
1484 unsigned int next_eip)
1485{
1486 SegmentCache *dt;
1487 target_ulong ptr, ssp;
1488 int selector;
1489 uint32_t offset, esp;
1490 uint32_t old_cs, old_eip;
1491
1492 /* real mode (simpler !) */
1493 dt = &env->idt;
1494#ifndef VBOX
1495 if (intno * 4 + 3 > dt->limit)
1496#else
1497 if ((unsigned)intno * 4 + 3 > dt->limit)
1498#endif
1499 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1500 ptr = dt->base + intno * 4;
1501 offset = lduw_kernel(ptr);
1502 selector = lduw_kernel(ptr + 2);
1503 esp = ESP;
1504 ssp = env->segs[R_SS].base;
1505 if (is_int)
1506 old_eip = next_eip;
1507 else
1508 old_eip = env->eip;
1509 old_cs = env->segs[R_CS].selector;
1510 /* XXX: use SS segment size ? */
1511 PUSHW(ssp, esp, 0xffff, compute_eflags());
1512 PUSHW(ssp, esp, 0xffff, old_cs);
1513 PUSHW(ssp, esp, 0xffff, old_eip);
1514
1515 /* update processor state */
1516 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1517 env->eip = offset;
1518 env->segs[R_CS].selector = selector;
1519 env->segs[R_CS].base = (selector << 4);
1520 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1521}
1522
1523/* fake user mode interrupt */
1524void do_interrupt_user(int intno, int is_int, int error_code,
1525 target_ulong next_eip)
1526{
1527 SegmentCache *dt;
1528 target_ulong ptr;
1529 int dpl, cpl, shift;
1530 uint32_t e2;
1531
1532 dt = &env->idt;
1533 if (env->hflags & HF_LMA_MASK) {
1534 shift = 4;
1535 } else {
1536 shift = 3;
1537 }
1538 ptr = dt->base + (intno << shift);
1539 e2 = ldl_kernel(ptr + 4);
1540
1541 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1542 cpl = env->hflags & HF_CPL_MASK;
1543 /* check privilege if software int */
1544 if (is_int && dpl < cpl)
1545 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1546
1547 /* Since we emulate only user space, we cannot do more than
1548 exiting the emulation with the suitable exception and error
1549 code */
1550 if (is_int)
1551 EIP = next_eip;
1552}
1553
1554/*
1555 * Begin execution of an interruption. is_int is TRUE if coming from
1556 * the int instruction. next_eip is the EIP value AFTER the interrupt
1557 * instruction. It is only relevant if is_int is TRUE.
1558 */
1559void do_interrupt(int intno, int is_int, int error_code,
1560 target_ulong next_eip, int is_hw)
1561{
1562 if (loglevel & CPU_LOG_INT) {
1563 if ((env->cr[0] & CR0_PE_MASK)) {
1564 static int count;
1565 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1566 count, intno, error_code, is_int,
1567 env->hflags & HF_CPL_MASK,
1568 env->segs[R_CS].selector, EIP,
1569 (int)env->segs[R_CS].base + EIP,
1570 env->segs[R_SS].selector, ESP);
1571 if (intno == 0x0e) {
1572 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1573 } else {
1574 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1575 }
1576 fprintf(logfile, "\n");
1577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578#if 0
1579 {
1580 int i;
1581 uint8_t *ptr;
1582 fprintf(logfile, " code=");
1583 ptr = env->segs[R_CS].base + env->eip;
1584 for(i = 0; i < 16; i++) {
1585 fprintf(logfile, " %02x", ldub(ptr + i));
1586 }
1587 fprintf(logfile, "\n");
1588 }
1589#endif
1590 count++;
1591 }
1592 }
1593 if (env->cr[0] & CR0_PE_MASK) {
1594#ifdef TARGET_X86_64
1595 if (env->hflags & HF_LMA_MASK) {
1596 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1597 } else
1598#endif
1599 {
1600#ifdef VBOX
1601 /* int xx *, v86 code and VME enabled? */
1602 if ( (env->eflags & VM_MASK)
1603 && (env->cr[4] & CR4_VME_MASK)
1604 && is_int
1605 && !is_hw
1606 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1607 )
1608 do_soft_interrupt_vme(intno, error_code, next_eip);
1609 else
1610#endif /* VBOX */
1611 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1612 }
1613 } else {
1614 do_interrupt_real(intno, is_int, error_code, next_eip);
1615 }
1616}
1617
1618/*
1619 * Check nested exceptions and change to double or triple fault if
1620 * needed. It should only be called, if this is not an interrupt.
1621 * Returns the new exception number.
1622 */
1623static int check_exception(int intno, int *error_code)
1624{
1625 int first_contributory = env->old_exception == 0 ||
1626 (env->old_exception >= 10 &&
1627 env->old_exception <= 13);
1628 int second_contributory = intno == 0 ||
1629 (intno >= 10 && intno <= 13);
1630
1631 if (loglevel & CPU_LOG_INT)
1632 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1633 env->old_exception, intno);
1634
1635 if (env->old_exception == EXCP08_DBLE)
1636 cpu_abort(env, "triple fault");
1637
1638 if ((first_contributory && second_contributory)
1639 || (env->old_exception == EXCP0E_PAGE &&
1640 (second_contributory || (intno == EXCP0E_PAGE)))) {
1641 intno = EXCP08_DBLE;
1642 *error_code = 0;
1643 }
1644
1645 if (second_contributory || (intno == EXCP0E_PAGE) ||
1646 (intno == EXCP08_DBLE))
1647 env->old_exception = intno;
1648
1649 return intno;
1650}
1651
1652/*
1653 * Signal an interruption. It is executed in the main CPU loop.
1654 * is_int is TRUE if coming from the int instruction. next_eip is the
1655 * EIP value AFTER the interrupt instruction. It is only relevant if
1656 * is_int is TRUE.
1657 */
1658void raise_interrupt(int intno, int is_int, int error_code,
1659 int next_eip_addend)
1660{
1661#if defined(VBOX) && defined(DEBUG)
1662 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1663#endif
1664 if (!is_int) {
1665 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1666 intno = check_exception(intno, &error_code);
1667 } else {
1668 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1669 }
1670
1671 env->exception_index = intno;
1672 env->error_code = error_code;
1673 env->exception_is_int = is_int;
1674 env->exception_next_eip = env->eip + next_eip_addend;
1675 cpu_loop_exit();
1676}
1677
1678/* shortcuts to generate exceptions */
1679
1680void (raise_exception_err)(int exception_index, int error_code)
1681{
1682 raise_interrupt(exception_index, 0, error_code, 0);
1683}
1684
1685void raise_exception(int exception_index)
1686{
1687 raise_interrupt(exception_index, 0, 0, 0);
1688}
1689
1690/* SMM support */
1691
1692#if defined(CONFIG_USER_ONLY)
1693
1694void do_smm_enter(void)
1695{
1696}
1697
1698void helper_rsm(void)
1699{
1700}
1701
1702#else
1703
1704#ifdef TARGET_X86_64
1705#define SMM_REVISION_ID 0x00020064
1706#else
1707#define SMM_REVISION_ID 0x00020000
1708#endif
1709
1710void do_smm_enter(void)
1711{
1712 target_ulong sm_state;
1713 SegmentCache *dt;
1714 int i, offset;
1715
1716 if (loglevel & CPU_LOG_INT) {
1717 fprintf(logfile, "SMM: enter\n");
1718 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1719 }
1720
1721 env->hflags |= HF_SMM_MASK;
1722 cpu_smm_update(env);
1723
1724 sm_state = env->smbase + 0x8000;
1725
1726#ifdef TARGET_X86_64
1727 for(i = 0; i < 6; i++) {
1728 dt = &env->segs[i];
1729 offset = 0x7e00 + i * 16;
1730 stw_phys(sm_state + offset, dt->selector);
1731 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1732 stl_phys(sm_state + offset + 4, dt->limit);
1733 stq_phys(sm_state + offset + 8, dt->base);
1734 }
1735
1736 stq_phys(sm_state + 0x7e68, env->gdt.base);
1737 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1738
1739 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1740 stq_phys(sm_state + 0x7e78, env->ldt.base);
1741 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1742 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1743
1744 stq_phys(sm_state + 0x7e88, env->idt.base);
1745 stl_phys(sm_state + 0x7e84, env->idt.limit);
1746
1747 stw_phys(sm_state + 0x7e90, env->tr.selector);
1748 stq_phys(sm_state + 0x7e98, env->tr.base);
1749 stl_phys(sm_state + 0x7e94, env->tr.limit);
1750 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1751
1752 stq_phys(sm_state + 0x7ed0, env->efer);
1753
1754 stq_phys(sm_state + 0x7ff8, EAX);
1755 stq_phys(sm_state + 0x7ff0, ECX);
1756 stq_phys(sm_state + 0x7fe8, EDX);
1757 stq_phys(sm_state + 0x7fe0, EBX);
1758 stq_phys(sm_state + 0x7fd8, ESP);
1759 stq_phys(sm_state + 0x7fd0, EBP);
1760 stq_phys(sm_state + 0x7fc8, ESI);
1761 stq_phys(sm_state + 0x7fc0, EDI);
1762 for(i = 8; i < 16; i++)
1763 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1764 stq_phys(sm_state + 0x7f78, env->eip);
1765 stl_phys(sm_state + 0x7f70, compute_eflags());
1766 stl_phys(sm_state + 0x7f68, env->dr[6]);
1767 stl_phys(sm_state + 0x7f60, env->dr[7]);
1768
1769 stl_phys(sm_state + 0x7f48, env->cr[4]);
1770 stl_phys(sm_state + 0x7f50, env->cr[3]);
1771 stl_phys(sm_state + 0x7f58, env->cr[0]);
1772
1773 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1774 stl_phys(sm_state + 0x7f00, env->smbase);
1775#else
1776 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1777 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1778 stl_phys(sm_state + 0x7ff4, compute_eflags());
1779 stl_phys(sm_state + 0x7ff0, env->eip);
1780 stl_phys(sm_state + 0x7fec, EDI);
1781 stl_phys(sm_state + 0x7fe8, ESI);
1782 stl_phys(sm_state + 0x7fe4, EBP);
1783 stl_phys(sm_state + 0x7fe0, ESP);
1784 stl_phys(sm_state + 0x7fdc, EBX);
1785 stl_phys(sm_state + 0x7fd8, EDX);
1786 stl_phys(sm_state + 0x7fd4, ECX);
1787 stl_phys(sm_state + 0x7fd0, EAX);
1788 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1789 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1790
1791 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1792 stl_phys(sm_state + 0x7f64, env->tr.base);
1793 stl_phys(sm_state + 0x7f60, env->tr.limit);
1794 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1795
1796 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1797 stl_phys(sm_state + 0x7f80, env->ldt.base);
1798 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1799 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1800
1801 stl_phys(sm_state + 0x7f74, env->gdt.base);
1802 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1803
1804 stl_phys(sm_state + 0x7f58, env->idt.base);
1805 stl_phys(sm_state + 0x7f54, env->idt.limit);
1806
1807 for(i = 0; i < 6; i++) {
1808 dt = &env->segs[i];
1809 if (i < 3)
1810 offset = 0x7f84 + i * 12;
1811 else
1812 offset = 0x7f2c + (i - 3) * 12;
1813 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1814 stl_phys(sm_state + offset + 8, dt->base);
1815 stl_phys(sm_state + offset + 4, dt->limit);
1816 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1817 }
1818 stl_phys(sm_state + 0x7f14, env->cr[4]);
1819
1820 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1821 stl_phys(sm_state + 0x7ef8, env->smbase);
1822#endif
1823 /* init SMM cpu state */
1824
1825#ifdef TARGET_X86_64
1826 cpu_load_efer(env, 0);
1827#endif
1828 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1829 env->eip = 0x00008000;
1830 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1831 0xffffffff, 0);
1832 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1833 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1834 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1835 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1836 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1837
1838 cpu_x86_update_cr0(env,
1839 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1840 cpu_x86_update_cr4(env, 0);
1841 env->dr[7] = 0x00000400;
1842 CC_OP = CC_OP_EFLAGS;
1843}
1844
1845void helper_rsm(void)
1846{
1847#ifdef VBOX
1848 cpu_abort(env, "helper_rsm");
1849#else /* !VBOX */
1850 target_ulong sm_
1851
1852 target_ulong sm_state;
1853 int i, offset;
1854 uint32_t val;
1855
1856 sm_state = env->smbase + 0x8000;
1857#ifdef TARGET_X86_64
1858 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1859
1860 for(i = 0; i < 6; i++) {
1861 offset = 0x7e00 + i * 16;
1862 cpu_x86_load_seg_cache(env, i,
1863 lduw_phys(sm_state + offset),
1864 ldq_phys(sm_state + offset + 8),
1865 ldl_phys(sm_state + offset + 4),
1866 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1867 }
1868
1869 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1870 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1871
1872 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1873 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1874 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1875 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1876
1877 env->idt.base = ldq_phys(sm_state + 0x7e88);
1878 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1879
1880 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1881 env->tr.base = ldq_phys(sm_state + 0x7e98);
1882 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1883 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1884
1885 EAX = ldq_phys(sm_state + 0x7ff8);
1886 ECX = ldq_phys(sm_state + 0x7ff0);
1887 EDX = ldq_phys(sm_state + 0x7fe8);
1888 EBX = ldq_phys(sm_state + 0x7fe0);
1889 ESP = ldq_phys(sm_state + 0x7fd8);
1890 EBP = ldq_phys(sm_state + 0x7fd0);
1891 ESI = ldq_phys(sm_state + 0x7fc8);
1892 EDI = ldq_phys(sm_state + 0x7fc0);
1893 for(i = 8; i < 16; i++)
1894 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1895 env->eip = ldq_phys(sm_state + 0x7f78);
1896 load_eflags(ldl_phys(sm_state + 0x7f70),
1897 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1898 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1899 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1900
1901 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1902 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1903 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1904
1905 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1906 if (val & 0x20000) {
1907 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1908 }
1909#else
1910 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1911 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1912 load_eflags(ldl_phys(sm_state + 0x7ff4),
1913 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1914 env->eip = ldl_phys(sm_state + 0x7ff0);
1915 EDI = ldl_phys(sm_state + 0x7fec);
1916 ESI = ldl_phys(sm_state + 0x7fe8);
1917 EBP = ldl_phys(sm_state + 0x7fe4);
1918 ESP = ldl_phys(sm_state + 0x7fe0);
1919 EBX = ldl_phys(sm_state + 0x7fdc);
1920 EDX = ldl_phys(sm_state + 0x7fd8);
1921 ECX = ldl_phys(sm_state + 0x7fd4);
1922 EAX = ldl_phys(sm_state + 0x7fd0);
1923 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1924 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1925
1926 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1927 env->tr.base = ldl_phys(sm_state + 0x7f64);
1928 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1929 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1930
1931 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1932 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1933 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1934 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1935
1936 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1937 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1938
1939 env->idt.base = ldl_phys(sm_state + 0x7f58);
1940 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1941
1942 for(i = 0; i < 6; i++) {
1943 if (i < 3)
1944 offset = 0x7f84 + i * 12;
1945 else
1946 offset = 0x7f2c + (i - 3) * 12;
1947 cpu_x86_load_seg_cache(env, i,
1948 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1949 ldl_phys(sm_state + offset + 8),
1950 ldl_phys(sm_state + offset + 4),
1951 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1952 }
1953 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1954
1955 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1956 if (val & 0x20000) {
1957 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1958 }
1959#endif
1960 CC_OP = CC_OP_EFLAGS;
1961 env->hflags &= ~HF_SMM_MASK;
1962 cpu_smm_update(env);
1963
1964 if (loglevel & CPU_LOG_INT) {
1965 fprintf(logfile, "SMM: after RSM\n");
1966 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1967 }
1968#endif /* !VBOX */
1969}
1970
1971#endif /* !CONFIG_USER_ONLY */
1972
1973
1974/* division, flags are undefined */
1975
1976void helper_divb_AL(target_ulong t0)
1977{
1978 unsigned int num, den, q, r;
1979
1980 num = (EAX & 0xffff);
1981 den = (t0 & 0xff);
1982 if (den == 0) {
1983 raise_exception(EXCP00_DIVZ);
1984 }
1985 q = (num / den);
1986 if (q > 0xff)
1987 raise_exception(EXCP00_DIVZ);
1988 q &= 0xff;
1989 r = (num % den) & 0xff;
1990 EAX = (EAX & ~0xffff) | (r << 8) | q;
1991}
1992
1993void helper_idivb_AL(target_ulong t0)
1994{
1995 int num, den, q, r;
1996
1997 num = (int16_t)EAX;
1998 den = (int8_t)t0;
1999 if (den == 0) {
2000 raise_exception(EXCP00_DIVZ);
2001 }
2002 q = (num / den);
2003 if (q != (int8_t)q)
2004 raise_exception(EXCP00_DIVZ);
2005 q &= 0xff;
2006 r = (num % den) & 0xff;
2007 EAX = (EAX & ~0xffff) | (r << 8) | q;
2008}
2009
2010void helper_divw_AX(target_ulong t0)
2011{
2012 unsigned int num, den, q, r;
2013
2014 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2015 den = (t0 & 0xffff);
2016 if (den == 0) {
2017 raise_exception(EXCP00_DIVZ);
2018 }
2019 q = (num / den);
2020 if (q > 0xffff)
2021 raise_exception(EXCP00_DIVZ);
2022 q &= 0xffff;
2023 r = (num % den) & 0xffff;
2024 EAX = (EAX & ~0xffff) | q;
2025 EDX = (EDX & ~0xffff) | r;
2026}
2027
2028void helper_idivw_AX(target_ulong t0)
2029{
2030 int num, den, q, r;
2031
2032 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2033 den = (int16_t)t0;
2034 if (den == 0) {
2035 raise_exception(EXCP00_DIVZ);
2036 }
2037 q = (num / den);
2038 if (q != (int16_t)q)
2039 raise_exception(EXCP00_DIVZ);
2040 q &= 0xffff;
2041 r = (num % den) & 0xffff;
2042 EAX = (EAX & ~0xffff) | q;
2043 EDX = (EDX & ~0xffff) | r;
2044}
2045
2046void helper_divl_EAX(target_ulong t0)
2047{
2048 unsigned int den, r;
2049 uint64_t num, q;
2050
2051 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2052 den = t0;
2053 if (den == 0) {
2054 raise_exception(EXCP00_DIVZ);
2055 }
2056 q = (num / den);
2057 r = (num % den);
2058 if (q > 0xffffffff)
2059 raise_exception(EXCP00_DIVZ);
2060 EAX = (uint32_t)q;
2061 EDX = (uint32_t)r;
2062}
2063
2064void helper_idivl_EAX(target_ulong t0)
2065{
2066 int den, r;
2067 int64_t num, q;
2068
2069 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2070 den = t0;
2071 if (den == 0) {
2072 raise_exception(EXCP00_DIVZ);
2073 }
2074 q = (num / den);
2075 r = (num % den);
2076 if (q != (int32_t)q)
2077 raise_exception(EXCP00_DIVZ);
2078 EAX = (uint32_t)q;
2079 EDX = (uint32_t)r;
2080}
2081
2082/* bcd */
2083
2084/* XXX: exception */
2085void helper_aam(int base)
2086{
2087 int al, ah;
2088 al = EAX & 0xff;
2089 ah = al / base;
2090 al = al % base;
2091 EAX = (EAX & ~0xffff) | al | (ah << 8);
2092 CC_DST = al;
2093}
2094
2095void helper_aad(int base)
2096{
2097 int al, ah;
2098 al = EAX & 0xff;
2099 ah = (EAX >> 8) & 0xff;
2100 al = ((ah * base) + al) & 0xff;
2101 EAX = (EAX & ~0xffff) | al;
2102 CC_DST = al;
2103}
2104
2105void helper_aaa(void)
2106{
2107 int icarry;
2108 int al, ah, af;
2109 int eflags;
2110
2111 eflags = cc_table[CC_OP].compute_all();
2112 af = eflags & CC_A;
2113 al = EAX & 0xff;
2114 ah = (EAX >> 8) & 0xff;
2115
2116 icarry = (al > 0xf9);
2117 if (((al & 0x0f) > 9 ) || af) {
2118 al = (al + 6) & 0x0f;
2119 ah = (ah + 1 + icarry) & 0xff;
2120 eflags |= CC_C | CC_A;
2121 } else {
2122 eflags &= ~(CC_C | CC_A);
2123 al &= 0x0f;
2124 }
2125 EAX = (EAX & ~0xffff) | al | (ah << 8);
2126 CC_SRC = eflags;
2127 FORCE_RET();
2128}
2129
2130void helper_aas(void)
2131{
2132 int icarry;
2133 int al, ah, af;
2134 int eflags;
2135
2136 eflags = cc_table[CC_OP].compute_all();
2137 af = eflags & CC_A;
2138 al = EAX & 0xff;
2139 ah = (EAX >> 8) & 0xff;
2140
2141 icarry = (al < 6);
2142 if (((al & 0x0f) > 9 ) || af) {
2143 al = (al - 6) & 0x0f;
2144 ah = (ah - 1 - icarry) & 0xff;
2145 eflags |= CC_C | CC_A;
2146 } else {
2147 eflags &= ~(CC_C | CC_A);
2148 al &= 0x0f;
2149 }
2150 EAX = (EAX & ~0xffff) | al | (ah << 8);
2151 CC_SRC = eflags;
2152 FORCE_RET();
2153}
2154
2155void helper_daa(void)
2156{
2157 int al, af, cf;
2158 int eflags;
2159
2160 eflags = cc_table[CC_OP].compute_all();
2161 cf = eflags & CC_C;
2162 af = eflags & CC_A;
2163 al = EAX & 0xff;
2164
2165 eflags = 0;
2166 if (((al & 0x0f) > 9 ) || af) {
2167 al = (al + 6) & 0xff;
2168 eflags |= CC_A;
2169 }
2170 if ((al > 0x9f) || cf) {
2171 al = (al + 0x60) & 0xff;
2172 eflags |= CC_C;
2173 }
2174 EAX = (EAX & ~0xff) | al;
2175 /* well, speed is not an issue here, so we compute the flags by hand */
2176 eflags |= (al == 0) << 6; /* zf */
2177 eflags |= parity_table[al]; /* pf */
2178 eflags |= (al & 0x80); /* sf */
2179 CC_SRC = eflags;
2180 FORCE_RET();
2181}
2182
2183void helper_das(void)
2184{
2185 int al, al1, af, cf;
2186 int eflags;
2187
2188 eflags = cc_table[CC_OP].compute_all();
2189 cf = eflags & CC_C;
2190 af = eflags & CC_A;
2191 al = EAX & 0xff;
2192
2193 eflags = 0;
2194 al1 = al;
2195 if (((al & 0x0f) > 9 ) || af) {
2196 eflags |= CC_A;
2197 if (al < 6 || cf)
2198 eflags |= CC_C;
2199 al = (al - 6) & 0xff;
2200 }
2201 if ((al1 > 0x99) || cf) {
2202 al = (al - 0x60) & 0xff;
2203 eflags |= CC_C;
2204 }
2205 EAX = (EAX & ~0xff) | al;
2206 /* well, speed is not an issue here, so we compute the flags by hand */
2207 eflags |= (al == 0) << 6; /* zf */
2208 eflags |= parity_table[al]; /* pf */
2209 eflags |= (al & 0x80); /* sf */
2210 CC_SRC = eflags;
2211 FORCE_RET();
2212}
2213
2214void helper_into(int next_eip_addend)
2215{
2216 int eflags;
2217 eflags = cc_table[CC_OP].compute_all();
2218 if (eflags & CC_O) {
2219 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2220 }
2221}
2222
2223void helper_cmpxchg8b(target_ulong a0)
2224{
2225 uint64_t d;
2226 int eflags;
2227
2228 eflags = cc_table[CC_OP].compute_all();
2229 d = ldq(a0);
2230 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2231 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2232 eflags |= CC_Z;
2233 } else {
2234 /* always do the store */
2235 stq(a0, d);
2236 EDX = (uint32_t)(d >> 32);
2237 EAX = (uint32_t)d;
2238 eflags &= ~CC_Z;
2239 }
2240 CC_SRC = eflags;
2241}
2242
2243#ifdef TARGET_X86_64
2244void helper_cmpxchg16b(target_ulong a0)
2245{
2246 uint64_t d0, d1;
2247 int eflags;
2248
2249 if ((a0 & 0xf) != 0)
2250 raise_exception(EXCP0D_GPF);
2251 eflags = cc_table[CC_OP].compute_all();
2252 d0 = ldq(a0);
2253 d1 = ldq(a0 + 8);
2254 if (d0 == EAX && d1 == EDX) {
2255 stq(a0, EBX);
2256 stq(a0 + 8, ECX);
2257 eflags |= CC_Z;
2258 } else {
2259 /* always do the store */
2260 stq(a0, d0);
2261 stq(a0 + 8, d1);
2262 EDX = d1;
2263 EAX = d0;
2264 eflags &= ~CC_Z;
2265 }
2266 CC_SRC = eflags;
2267}
2268#endif
2269
2270void helper_single_step(void)
2271{
2272 env->dr[6] |= 0x4000;
2273 raise_exception(EXCP01_SSTP);
2274}
2275
2276void helper_cpuid(void)
2277{
2278#ifndef VBOX
2279 uint32_t index;
2280
2281 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2282
2283 index = (uint32_t)EAX;
2284 /* test if maximum index reached */
2285 if (index & 0x80000000) {
2286 if (index > env->cpuid_xlevel)
2287 index = env->cpuid_level;
2288 } else {
2289 if (index > env->cpuid_level)
2290 index = env->cpuid_level;
2291 }
2292
2293 switch(index) {
2294 case 0:
2295 EAX = env->cpuid_level;
2296 EBX = env->cpuid_vendor1;
2297 EDX = env->cpuid_vendor2;
2298 ECX = env->cpuid_vendor3;
2299 break;
2300 case 1:
2301 EAX = env->cpuid_version;
2302 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2303 ECX = env->cpuid_ext_features;
2304 EDX = env->cpuid_features;
2305 break;
2306 case 2:
2307 /* cache info: needed for Pentium Pro compatibility */
2308 EAX = 1;
2309 EBX = 0;
2310 ECX = 0;
2311 EDX = 0x2c307d;
2312 break;
2313 case 4:
2314 /* cache info: needed for Core compatibility */
2315 switch (ECX) {
2316 case 0: /* L1 dcache info */
2317 EAX = 0x0000121;
2318 EBX = 0x1c0003f;
2319 ECX = 0x000003f;
2320 EDX = 0x0000001;
2321 break;
2322 case 1: /* L1 icache info */
2323 EAX = 0x0000122;
2324 EBX = 0x1c0003f;
2325 ECX = 0x000003f;
2326 EDX = 0x0000001;
2327 break;
2328 case 2: /* L2 cache info */
2329 EAX = 0x0000143;
2330 EBX = 0x3c0003f;
2331 ECX = 0x0000fff;
2332 EDX = 0x0000001;
2333 break;
2334 default: /* end of info */
2335 EAX = 0;
2336 EBX = 0;
2337 ECX = 0;
2338 EDX = 0;
2339 break;
2340 }
2341
2342 break;
2343 case 5:
2344 /* mwait info: needed for Core compatibility */
2345 EAX = 0; /* Smallest monitor-line size in bytes */
2346 EBX = 0; /* Largest monitor-line size in bytes */
2347 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2348 EDX = 0;
2349 break;
2350 case 6:
2351 /* Thermal and Power Leaf */
2352 EAX = 0;
2353 EBX = 0;
2354 ECX = 0;
2355 EDX = 0;
2356 break;
2357 case 9:
2358 /* Direct Cache Access Information Leaf */
2359 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
2360 EBX = 0;
2361 ECX = 0;
2362 EDX = 0;
2363 break;
2364 case 0xA:
2365 /* Architectural Performance Monitoring Leaf */
2366 EAX = 0;
2367 EBX = 0;
2368 ECX = 0;
2369 EDX = 0;
2370 break;
2371 case 0x80000000:
2372 EAX = env->cpuid_xlevel;
2373 EBX = env->cpuid_vendor1;
2374 EDX = env->cpuid_vendor2;
2375 ECX = env->cpuid_vendor3;
2376 break;
2377 case 0x80000001:
2378 EAX = env->cpuid_features;
2379 EBX = 0;
2380 ECX = env->cpuid_ext3_features;
2381 EDX = env->cpuid_ext2_features;
2382 break;
2383 case 0x80000002:
2384 case 0x80000003:
2385 case 0x80000004:
2386 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2387 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2388 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2389 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2390 break;
2391 case 0x80000005:
2392 /* cache info (L1 cache) */
2393 EAX = 0x01ff01ff;
2394 EBX = 0x01ff01ff;
2395 ECX = 0x40020140;
2396 EDX = 0x40020140;
2397 break;
2398 case 0x80000006:
2399 /* cache info (L2 cache) */
2400 EAX = 0;
2401 EBX = 0x42004200;
2402 ECX = 0x02008140;
2403 EDX = 0;
2404 break;
2405 case 0x80000008:
2406 /* virtual & phys address size in low 2 bytes. */
2407/* XXX: This value must match the one used in the MMU code. */
2408 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2409 /* 64 bit processor */
2410#if defined(USE_KQEMU)
2411 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2412#else
2413/* XXX: The physical address space is limited to 42 bits in exec.c. */
2414 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2415#endif
2416 } else {
2417#if defined(USE_KQEMU)
2418 EAX = 0x00000020; /* 32 bits physical */
2419#else
2420 if (env->cpuid_features & CPUID_PSE36)
2421 EAX = 0x00000024; /* 36 bits physical */
2422 else
2423 EAX = 0x00000020; /* 32 bits physical */
2424#endif
2425 }
2426 EBX = 0;
2427 ECX = 0;
2428 EDX = 0;
2429 break;
2430 case 0x8000000A:
2431 EAX = 0x00000001;
2432 EBX = 0;
2433 ECX = 0;
2434 EDX = 0;
2435 break;
2436 default:
2437 /* reserved values: zero */
2438 EAX = 0;
2439 EBX = 0;
2440 ECX = 0;
2441 EDX = 0;
2442 break;
2443 }
2444#else /* VBOX */
2445 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
2446#endif /* VBOX */
2447}
2448
2449void helper_enter_level(int level, int data32, target_ulong t1)
2450{
2451 target_ulong ssp;
2452 uint32_t esp_mask, esp, ebp;
2453
2454 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2455 ssp = env->segs[R_SS].base;
2456 ebp = EBP;
2457 esp = ESP;
2458 if (data32) {
2459 /* 32 bit */
2460 esp -= 4;
2461 while (--level) {
2462 esp -= 4;
2463 ebp -= 4;
2464 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2465 }
2466 esp -= 4;
2467 stl(ssp + (esp & esp_mask), t1);
2468 } else {
2469 /* 16 bit */
2470 esp -= 2;
2471 while (--level) {
2472 esp -= 2;
2473 ebp -= 2;
2474 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2475 }
2476 esp -= 2;
2477 stw(ssp + (esp & esp_mask), t1);
2478 }
2479}
2480
2481#ifdef TARGET_X86_64
2482void helper_enter64_level(int level, int data64, target_ulong t1)
2483{
2484 target_ulong esp, ebp;
2485 ebp = EBP;
2486 esp = ESP;
2487
2488 if (data64) {
2489 /* 64 bit */
2490 esp -= 8;
2491 while (--level) {
2492 esp -= 8;
2493 ebp -= 8;
2494 stq(esp, ldq(ebp));
2495 }
2496 esp -= 8;
2497 stq(esp, t1);
2498 } else {
2499 /* 16 bit */
2500 esp -= 2;
2501 while (--level) {
2502 esp -= 2;
2503 ebp -= 2;
2504 stw(esp, lduw(ebp));
2505 }
2506 esp -= 2;
2507 stw(esp, t1);
2508 }
2509}
2510#endif
2511
2512void helper_lldt(int selector)
2513{
2514 SegmentCache *dt;
2515 uint32_t e1, e2;
2516#ifndef VBOX
2517 int index, entry_limit;
2518#else
2519 unsigned int index, entry_limit;
2520#endif
2521 target_ulong ptr;
2522
2523#ifdef VBOX
2524 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2525 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2526#endif
2527
2528 selector &= 0xffff;
2529 if ((selector & 0xfffc) == 0) {
2530 /* XXX: NULL selector case: invalid LDT */
2531 env->ldt.base = 0;
2532 env->ldt.limit = 0;
2533 } else {
2534 if (selector & 0x4)
2535 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2536 dt = &env->gdt;
2537 index = selector & ~7;
2538#ifdef TARGET_X86_64
2539 if (env->hflags & HF_LMA_MASK)
2540 entry_limit = 15;
2541 else
2542#endif
2543 entry_limit = 7;
2544 if ((index + entry_limit) > dt->limit)
2545 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2546 ptr = dt->base + index;
2547 e1 = ldl_kernel(ptr);
2548 e2 = ldl_kernel(ptr + 4);
2549 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2550 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2551 if (!(e2 & DESC_P_MASK))
2552 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2553#ifdef TARGET_X86_64
2554 if (env->hflags & HF_LMA_MASK) {
2555 uint32_t e3;
2556 e3 = ldl_kernel(ptr + 8);
2557 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2558 env->ldt.base |= (target_ulong)e3 << 32;
2559 } else
2560#endif
2561 {
2562 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2563 }
2564 }
2565 env->ldt.selector = selector;
2566#ifdef VBOX
2567 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2568 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2569#endif
2570}
2571
2572void helper_ltr(int selector)
2573{
2574 SegmentCache *dt;
2575 uint32_t e1, e2;
2576#ifndef VBOX
2577 int index, type, entry_limit;
2578#else
2579 unsigned int index;
2580 int type, entry_limit;
2581#endif
2582 target_ulong ptr;
2583
2584#ifdef VBOX
2585 Log(("helper_ltr: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2586 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2587 env->tr.flags, (RTSEL)(selector & 0xffff)));
2588#endif
2589 selector &= 0xffff;
2590 if ((selector & 0xfffc) == 0) {
2591 /* NULL selector case: invalid TR */
2592 env->tr.base = 0;
2593 env->tr.limit = 0;
2594 env->tr.flags = 0;
2595 } else {
2596 if (selector & 0x4)
2597 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2598 dt = &env->gdt;
2599 index = selector & ~7;
2600#ifdef TARGET_X86_64
2601 if (env->hflags & HF_LMA_MASK)
2602 entry_limit = 15;
2603 else
2604#endif
2605 entry_limit = 7;
2606 if ((index + entry_limit) > dt->limit)
2607 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2608 ptr = dt->base + index;
2609 e1 = ldl_kernel(ptr);
2610 e2 = ldl_kernel(ptr + 4);
2611 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2612 if ((e2 & DESC_S_MASK) ||
2613 (type != 1 && type != 9))
2614 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2615 if (!(e2 & DESC_P_MASK))
2616 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2617#ifdef TARGET_X86_64
2618 if (env->hflags & HF_LMA_MASK) {
2619 uint32_t e3, e4;
2620 e3 = ldl_kernel(ptr + 8);
2621 e4 = ldl_kernel(ptr + 12);
2622 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2623 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2624 load_seg_cache_raw_dt(&env->tr, e1, e2);
2625 env->tr.base |= (target_ulong)e3 << 32;
2626 } else
2627#endif
2628 {
2629 load_seg_cache_raw_dt(&env->tr, e1, e2);
2630 }
2631 e2 |= DESC_TSS_BUSY_MASK;
2632 stl_kernel(ptr + 4, e2);
2633 }
2634 env->tr.selector = selector;
2635#ifdef VBOX
2636 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2637 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2638 env->tr.flags, (RTSEL)(selector & 0xffff)));
2639#endif
2640}
2641
2642/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2643void helper_load_seg(int seg_reg, int selector)
2644{
2645 uint32_t e1, e2;
2646 int cpl, dpl, rpl;
2647 SegmentCache *dt;
2648#ifndef VBOX
2649 int index;
2650#else
2651 unsigned int index;
2652#endif
2653 target_ulong ptr;
2654
2655 selector &= 0xffff;
2656 cpl = env->hflags & HF_CPL_MASK;
2657
2658#ifdef VBOX
2659 /* Trying to load a selector with CPL=1? */
2660 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2661 {
2662 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2663 selector = selector & 0xfffc;
2664 }
2665#endif
2666 if ((selector & 0xfffc) == 0) {
2667 /* null selector case */
2668 if (seg_reg == R_SS
2669#ifdef TARGET_X86_64
2670 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2671#endif
2672 )
2673 raise_exception_err(EXCP0D_GPF, 0);
2674 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2675 } else {
2676
2677 if (selector & 0x4)
2678 dt = &env->ldt;
2679 else
2680 dt = &env->gdt;
2681 index = selector & ~7;
2682 if ((index + 7) > dt->limit)
2683 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2684 ptr = dt->base + index;
2685 e1 = ldl_kernel(ptr);
2686 e2 = ldl_kernel(ptr + 4);
2687
2688 if (!(e2 & DESC_S_MASK))
2689 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2690 rpl = selector & 3;
2691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2692 if (seg_reg == R_SS) {
2693 /* must be writable segment */
2694 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2695 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2696 if (rpl != cpl || dpl != cpl)
2697 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2698 } else {
2699 /* must be readable segment */
2700 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2701 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2702
2703 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2704 /* if not conforming code, test rights */
2705 if (dpl < cpl || dpl < rpl)
2706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2707 }
2708 }
2709
2710 if (!(e2 & DESC_P_MASK)) {
2711 if (seg_reg == R_SS)
2712 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2713 else
2714 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2715 }
2716
2717 /* set the access bit if not already set */
2718 if (!(e2 & DESC_A_MASK)) {
2719 e2 |= DESC_A_MASK;
2720 stl_kernel(ptr + 4, e2);
2721 }
2722
2723 cpu_x86_load_seg_cache(env, seg_reg, selector,
2724 get_seg_base(e1, e2),
2725 get_seg_limit(e1, e2),
2726 e2);
2727#if 0
2728 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2729 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2730#endif
2731 }
2732}
2733
2734/* protected mode jump */
2735void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2736 int next_eip_addend)
2737{
2738 int gate_cs, type;
2739 uint32_t e1, e2, cpl, dpl, rpl, limit;
2740 target_ulong next_eip;
2741
2742#ifdef VBOX
2743 e1 = e2 = 0;
2744#endif
2745 if ((new_cs & 0xfffc) == 0)
2746 raise_exception_err(EXCP0D_GPF, 0);
2747 if (load_segment(&e1, &e2, new_cs) != 0)
2748 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2749 cpl = env->hflags & HF_CPL_MASK;
2750 if (e2 & DESC_S_MASK) {
2751 if (!(e2 & DESC_CS_MASK))
2752 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2753 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2754 if (e2 & DESC_C_MASK) {
2755 /* conforming code segment */
2756 if (dpl > cpl)
2757 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2758 } else {
2759 /* non conforming code segment */
2760 rpl = new_cs & 3;
2761 if (rpl > cpl)
2762 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2763 if (dpl != cpl)
2764 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2765 }
2766 if (!(e2 & DESC_P_MASK))
2767 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2768 limit = get_seg_limit(e1, e2);
2769 if (new_eip > limit &&
2770 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2771 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2772 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2773 get_seg_base(e1, e2), limit, e2);
2774 EIP = new_eip;
2775 } else {
2776 /* jump to call or task gate */
2777 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2778 rpl = new_cs & 3;
2779 cpl = env->hflags & HF_CPL_MASK;
2780 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2781 switch(type) {
2782 case 1: /* 286 TSS */
2783 case 9: /* 386 TSS */
2784 case 5: /* task gate */
2785 if (dpl < cpl || dpl < rpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 next_eip = env->eip + next_eip_addend;
2788 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2789 CC_OP = CC_OP_EFLAGS;
2790 break;
2791 case 4: /* 286 call gate */
2792 case 12: /* 386 call gate */
2793 if ((dpl < cpl) || (dpl < rpl))
2794 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2795 if (!(e2 & DESC_P_MASK))
2796 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2797 gate_cs = e1 >> 16;
2798 new_eip = (e1 & 0xffff);
2799 if (type == 12)
2800 new_eip |= (e2 & 0xffff0000);
2801 if (load_segment(&e1, &e2, gate_cs) != 0)
2802 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2804 /* must be code segment */
2805 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2806 (DESC_S_MASK | DESC_CS_MASK)))
2807 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2808 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2809 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2810 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2811 if (!(e2 & DESC_P_MASK))
2812#ifdef VBOX /* See page 3-514 of 253666.pdf */
2813 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2814#else
2815 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2816#endif
2817 limit = get_seg_limit(e1, e2);
2818 if (new_eip > limit)
2819 raise_exception_err(EXCP0D_GPF, 0);
2820 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2821 get_seg_base(e1, e2), limit, e2);
2822 EIP = new_eip;
2823 break;
2824 default:
2825 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2826 break;
2827 }
2828 }
2829}
2830
2831/* real mode call */
2832void helper_lcall_real(int new_cs, target_ulong new_eip1,
2833 int shift, int next_eip)
2834{
2835 int new_eip;
2836 uint32_t esp, esp_mask;
2837 target_ulong ssp;
2838
2839 new_eip = new_eip1;
2840 esp = ESP;
2841 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2842 ssp = env->segs[R_SS].base;
2843 if (shift) {
2844 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2845 PUSHL(ssp, esp, esp_mask, next_eip);
2846 } else {
2847 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2848 PUSHW(ssp, esp, esp_mask, next_eip);
2849 }
2850
2851 SET_ESP(esp, esp_mask);
2852 env->eip = new_eip;
2853 env->segs[R_CS].selector = new_cs;
2854 env->segs[R_CS].base = (new_cs << 4);
2855}
2856
2857/* protected mode call */
2858void helper_lcall_protected(int new_cs, target_ulong new_eip,
2859 int shift, int next_eip_addend)
2860{
2861 int new_stack, i;
2862 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2863 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2864 uint32_t val, limit, old_sp_mask;
2865 target_ulong ssp, old_ssp, next_eip;
2866
2867#ifdef VBOX
2868 ss = ss_e1 = ss_e2 = e1 = e2 = 0;
2869#endif
2870 next_eip = env->eip + next_eip_addend;
2871#ifdef DEBUG_PCALL
2872 if (loglevel & CPU_LOG_PCALL) {
2873 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2874 new_cs, (uint32_t)new_eip, shift);
2875 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2876 }
2877#endif
2878 if ((new_cs & 0xfffc) == 0)
2879 raise_exception_err(EXCP0D_GPF, 0);
2880 if (load_segment(&e1, &e2, new_cs) != 0)
2881 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2882 cpl = env->hflags & HF_CPL_MASK;
2883#ifdef DEBUG_PCALL
2884 if (loglevel & CPU_LOG_PCALL) {
2885 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2886 }
2887#endif
2888 if (e2 & DESC_S_MASK) {
2889 if (!(e2 & DESC_CS_MASK))
2890 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2892 if (e2 & DESC_C_MASK) {
2893 /* conforming code segment */
2894 if (dpl > cpl)
2895 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2896 } else {
2897 /* non conforming code segment */
2898 rpl = new_cs & 3;
2899 if (rpl > cpl)
2900 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2901 if (dpl != cpl)
2902 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2903 }
2904 if (!(e2 & DESC_P_MASK))
2905 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2906
2907#ifdef TARGET_X86_64
2908 /* XXX: check 16/32 bit cases in long mode */
2909 if (shift == 2) {
2910 target_ulong rsp;
2911 /* 64 bit case */
2912 rsp = ESP;
2913 PUSHQ(rsp, env->segs[R_CS].selector);
2914 PUSHQ(rsp, next_eip);
2915 /* from this point, not restartable */
2916 ESP = rsp;
2917 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2918 get_seg_base(e1, e2),
2919 get_seg_limit(e1, e2), e2);
2920 EIP = new_eip;
2921 } else
2922#endif
2923 {
2924 sp = ESP;
2925 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2926 ssp = env->segs[R_SS].base;
2927 if (shift) {
2928 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2929 PUSHL(ssp, sp, sp_mask, next_eip);
2930 } else {
2931 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2932 PUSHW(ssp, sp, sp_mask, next_eip);
2933 }
2934
2935 limit = get_seg_limit(e1, e2);
2936 if (new_eip > limit)
2937 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2938 /* from this point, not restartable */
2939 SET_ESP(sp, sp_mask);
2940 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2941 get_seg_base(e1, e2), limit, e2);
2942 EIP = new_eip;
2943 }
2944 } else {
2945 /* check gate type */
2946 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2947 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2948 rpl = new_cs & 3;
2949 switch(type) {
2950 case 1: /* available 286 TSS */
2951 case 9: /* available 386 TSS */
2952 case 5: /* task gate */
2953 if (dpl < cpl || dpl < rpl)
2954 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2955 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2956 CC_OP = CC_OP_EFLAGS;
2957 return;
2958 case 4: /* 286 call gate */
2959 case 12: /* 386 call gate */
2960 break;
2961 default:
2962 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2963 break;
2964 }
2965 shift = type >> 3;
2966
2967 if (dpl < cpl || dpl < rpl)
2968 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2969 /* check valid bit */
2970 if (!(e2 & DESC_P_MASK))
2971 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2972 selector = e1 >> 16;
2973 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2974 param_count = e2 & 0x1f;
2975 if ((selector & 0xfffc) == 0)
2976 raise_exception_err(EXCP0D_GPF, 0);
2977
2978 if (load_segment(&e1, &e2, selector) != 0)
2979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2980 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2981 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2982 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2983 if (dpl > cpl)
2984 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2985 if (!(e2 & DESC_P_MASK))
2986 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2987
2988 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2989 /* to inner privilege */
2990 get_ss_esp_from_tss(&ss, &sp, dpl);
2991#ifdef DEBUG_PCALL
2992 if (loglevel & CPU_LOG_PCALL)
2993 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2994 ss, sp, param_count, ESP);
2995#endif
2996 if ((ss & 0xfffc) == 0)
2997 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2998 if ((ss & 3) != dpl)
2999 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3000 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3001 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3002 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3003 if (ss_dpl != dpl)
3004 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3005 if (!(ss_e2 & DESC_S_MASK) ||
3006 (ss_e2 & DESC_CS_MASK) ||
3007 !(ss_e2 & DESC_W_MASK))
3008 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3009 if (!(ss_e2 & DESC_P_MASK))
3010#ifdef VBOX /* See page 3-99 of 253666.pdf */
3011 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3012#else
3013 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3014#endif
3015
3016 // push_size = ((param_count * 2) + 8) << shift;
3017
3018 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3019 old_ssp = env->segs[R_SS].base;
3020
3021 sp_mask = get_sp_mask(ss_e2);
3022 ssp = get_seg_base(ss_e1, ss_e2);
3023 if (shift) {
3024 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3025 PUSHL(ssp, sp, sp_mask, ESP);
3026 for(i = param_count - 1; i >= 0; i--) {
3027 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3028 PUSHL(ssp, sp, sp_mask, val);
3029 }
3030 } else {
3031 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3032 PUSHW(ssp, sp, sp_mask, ESP);
3033 for(i = param_count - 1; i >= 0; i--) {
3034 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3035 PUSHW(ssp, sp, sp_mask, val);
3036 }
3037 }
3038 new_stack = 1;
3039 } else {
3040 /* to same privilege */
3041 sp = ESP;
3042 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3043 ssp = env->segs[R_SS].base;
3044 // push_size = (4 << shift);
3045 new_stack = 0;
3046 }
3047
3048 if (shift) {
3049 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3050 PUSHL(ssp, sp, sp_mask, next_eip);
3051 } else {
3052 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3053 PUSHW(ssp, sp, sp_mask, next_eip);
3054 }
3055
3056 /* from this point, not restartable */
3057
3058 if (new_stack) {
3059 ss = (ss & ~3) | dpl;
3060 cpu_x86_load_seg_cache(env, R_SS, ss,
3061 ssp,
3062 get_seg_limit(ss_e1, ss_e2),
3063 ss_e2);
3064 }
3065
3066 selector = (selector & ~3) | dpl;
3067 cpu_x86_load_seg_cache(env, R_CS, selector,
3068 get_seg_base(e1, e2),
3069 get_seg_limit(e1, e2),
3070 e2);
3071 cpu_x86_set_cpl(env, dpl);
3072 SET_ESP(sp, sp_mask);
3073 EIP = offset;
3074 }
3075#ifdef USE_KQEMU
3076 if (kqemu_is_ok(env)) {
3077 env->exception_index = -1;
3078 cpu_loop_exit();
3079 }
3080#endif
3081}
3082
3083/* real and vm86 mode iret */
3084void helper_iret_real(int shift)
3085{
3086 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3087 target_ulong ssp;
3088 int eflags_mask;
3089#ifdef VBOX
3090 bool fVME = false;
3091
3092 remR3TrapClear(env->pVM);
3093#endif /* VBOX */
3094
3095 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3096 sp = ESP;
3097 ssp = env->segs[R_SS].base;
3098 if (shift == 1) {
3099 /* 32 bits */
3100 POPL(ssp, sp, sp_mask, new_eip);
3101 POPL(ssp, sp, sp_mask, new_cs);
3102 new_cs &= 0xffff;
3103 POPL(ssp, sp, sp_mask, new_eflags);
3104 } else {
3105 /* 16 bits */
3106 POPW(ssp, sp, sp_mask, new_eip);
3107 POPW(ssp, sp, sp_mask, new_cs);
3108 POPW(ssp, sp, sp_mask, new_eflags);
3109 }
3110#ifdef VBOX
3111 if ( (env->eflags & VM_MASK)
3112 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3113 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3114 {
3115 fVME = true;
3116 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3117 /* if TF will be set -> #GP */
3118 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3119 || (new_eflags & TF_MASK))
3120 raise_exception(EXCP0D_GPF);
3121 }
3122#endif /* VBOX */
3123 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3124 env->segs[R_CS].selector = new_cs;
3125 env->segs[R_CS].base = (new_cs << 4);
3126 env->eip = new_eip;
3127#ifdef VBOX
3128 if (fVME)
3129 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3130 else
3131#endif
3132 if (env->eflags & VM_MASK)
3133 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3134 else
3135 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3136 if (shift == 0)
3137 eflags_mask &= 0xffff;
3138 load_eflags(new_eflags, eflags_mask);
3139 env->hflags2 &= ~HF2_NMI_MASK;
3140#ifdef VBOX
3141 if (fVME)
3142 {
3143 if (new_eflags & IF_MASK)
3144 env->eflags |= VIF_MASK;
3145 else
3146 env->eflags &= ~VIF_MASK;
3147 }
3148#endif /* VBOX */
3149}
3150
3151#ifndef VBOX
3152static inline void validate_seg(int seg_reg, int cpl)
3153#else /* VBOX */
3154DECLINLINE(void) validate_seg(int seg_reg, int cpl)
3155#endif /* VBOX */
3156{
3157 int dpl;
3158 uint32_t e2;
3159
3160 /* XXX: on x86_64, we do not want to nullify FS and GS because
3161 they may still contain a valid base. I would be interested to
3162 know how a real x86_64 CPU behaves */
3163 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3164 (env->segs[seg_reg].selector & 0xfffc) == 0)
3165 return;
3166
3167 e2 = env->segs[seg_reg].flags;
3168 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3169 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3170 /* data or non conforming code segment */
3171 if (dpl < cpl) {
3172 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3173 }
3174 }
3175}
3176
3177/* protected mode iret */
3178#ifndef VBOX
3179static inline void helper_ret_protected(int shift, int is_iret, int addend)
3180#else /* VBOX */
3181DECLINLINE(void) helper_ret_protected(int shift, int is_iret, int addend)
3182#endif /* VBOX */
3183{
3184 uint32_t new_cs, new_eflags, new_ss;
3185 uint32_t new_es, new_ds, new_fs, new_gs;
3186 uint32_t e1, e2, ss_e1, ss_e2;
3187 int cpl, dpl, rpl, eflags_mask, iopl;
3188 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3189
3190#ifdef VBOX
3191 ss_e1 = ss_e2 = e1 = e2 = 0;
3192#endif
3193
3194#ifdef TARGET_X86_64
3195 if (shift == 2)
3196 sp_mask = -1;
3197 else
3198#endif
3199 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3200 sp = ESP;
3201 ssp = env->segs[R_SS].base;
3202 new_eflags = 0; /* avoid warning */
3203#ifdef TARGET_X86_64
3204 if (shift == 2) {
3205 POPQ(sp, new_eip);
3206 POPQ(sp, new_cs);
3207 new_cs &= 0xffff;
3208 if (is_iret) {
3209 POPQ(sp, new_eflags);
3210 }
3211 } else
3212#endif
3213 if (shift == 1) {
3214 /* 32 bits */
3215 POPL(ssp, sp, sp_mask, new_eip);
3216 POPL(ssp, sp, sp_mask, new_cs);
3217 new_cs &= 0xffff;
3218 if (is_iret) {
3219 POPL(ssp, sp, sp_mask, new_eflags);
3220#if defined(VBOX) && defined(DEBUG)
3221 printf("iret: new CS %04X\n", new_cs);
3222 printf("iret: new EIP %08X\n", (uint32_t)new_eip);
3223 printf("iret: new EFLAGS %08X\n", new_eflags);
3224 printf("iret: EAX=%08x\n", (uint32_t)EAX);
3225#endif
3226 if (new_eflags & VM_MASK)
3227 goto return_to_vm86;
3228 }
3229#ifdef VBOX
3230 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3231 {
3232#ifdef DEBUG
3233 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
3234#endif
3235 new_cs = new_cs & 0xfffc;
3236 }
3237#endif
3238 } else {
3239 /* 16 bits */
3240 POPW(ssp, sp, sp_mask, new_eip);
3241 POPW(ssp, sp, sp_mask, new_cs);
3242 if (is_iret)
3243 POPW(ssp, sp, sp_mask, new_eflags);
3244 }
3245#ifdef DEBUG_PCALL
3246 if (loglevel & CPU_LOG_PCALL) {
3247 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3248 new_cs, new_eip, shift, addend);
3249 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
3250 }
3251#endif
3252 if ((new_cs & 0xfffc) == 0)
3253 {
3254#if defined(VBOX) && defined(DEBUG)
3255 printf("new_cs & 0xfffc) == 0\n");
3256#endif
3257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3258 }
3259 if (load_segment(&e1, &e2, new_cs) != 0)
3260 {
3261#if defined(VBOX) && defined(DEBUG)
3262 printf("load_segment failed\n");
3263#endif
3264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3265 }
3266 if (!(e2 & DESC_S_MASK) ||
3267 !(e2 & DESC_CS_MASK))
3268 {
3269#if defined(VBOX) && defined(DEBUG)
3270 printf("e2 mask %08x\n", e2);
3271#endif
3272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3273 }
3274 cpl = env->hflags & HF_CPL_MASK;
3275 rpl = new_cs & 3;
3276 if (rpl < cpl)
3277 {
3278#if defined(VBOX) && defined(DEBUG)
3279 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
3280#endif
3281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3282 }
3283 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3284 if (e2 & DESC_C_MASK) {
3285 if (dpl > rpl)
3286 {
3287#if defined(VBOX) && defined(DEBUG)
3288 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
3289#endif
3290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3291 }
3292 } else {
3293 if (dpl != rpl)
3294 {
3295#if defined(VBOX) && defined(DEBUG)
3296 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
3297#endif
3298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3299 }
3300 }
3301 if (!(e2 & DESC_P_MASK))
3302 {
3303#if defined(VBOX) && defined(DEBUG)
3304 printf("DESC_P_MASK e2=%08x\n", e2);
3305#endif
3306 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3307 }
3308
3309 sp += addend;
3310 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3311 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3312 /* return to same privilege level */
3313 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3314 get_seg_base(e1, e2),
3315 get_seg_limit(e1, e2),
3316 e2);
3317 } else {
3318 /* return to different privilege level */
3319#ifdef TARGET_X86_64
3320 if (shift == 2) {
3321 POPQ(sp, new_esp);
3322 POPQ(sp, new_ss);
3323 new_ss &= 0xffff;
3324 } else
3325#endif
3326 if (shift == 1) {
3327 /* 32 bits */
3328 POPL(ssp, sp, sp_mask, new_esp);
3329 POPL(ssp, sp, sp_mask, new_ss);
3330 new_ss &= 0xffff;
3331 } else {
3332 /* 16 bits */
3333 POPW(ssp, sp, sp_mask, new_esp);
3334 POPW(ssp, sp, sp_mask, new_ss);
3335 }
3336#ifdef DEBUG_PCALL
3337 if (loglevel & CPU_LOG_PCALL) {
3338 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
3339 new_ss, new_esp);
3340 }
3341#endif
3342 if ((new_ss & 0xfffc) == 0) {
3343#ifdef TARGET_X86_64
3344 /* NULL ss is allowed in long mode if cpl != 3*/
3345 /* XXX: test CS64 ? */
3346 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3347 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3348 0, 0xffffffff,
3349 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3350 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3351 DESC_W_MASK | DESC_A_MASK);
3352 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3353 } else
3354#endif
3355 {
3356 raise_exception_err(EXCP0D_GPF, 0);
3357 }
3358 } else {
3359 if ((new_ss & 3) != rpl)
3360 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3361 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3362 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3363 if (!(ss_e2 & DESC_S_MASK) ||
3364 (ss_e2 & DESC_CS_MASK) ||
3365 !(ss_e2 & DESC_W_MASK))
3366 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3367 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3368 if (dpl != rpl)
3369 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3370 if (!(ss_e2 & DESC_P_MASK))
3371 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3372 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3373 get_seg_base(ss_e1, ss_e2),
3374 get_seg_limit(ss_e1, ss_e2),
3375 ss_e2);
3376 }
3377
3378 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3379 get_seg_base(e1, e2),
3380 get_seg_limit(e1, e2),
3381 e2);
3382 cpu_x86_set_cpl(env, rpl);
3383 sp = new_esp;
3384#ifdef TARGET_X86_64
3385 if (env->hflags & HF_CS64_MASK)
3386 sp_mask = -1;
3387 else
3388#endif
3389 sp_mask = get_sp_mask(ss_e2);
3390
3391 /* validate data segments */
3392 validate_seg(R_ES, rpl);
3393 validate_seg(R_DS, rpl);
3394 validate_seg(R_FS, rpl);
3395 validate_seg(R_GS, rpl);
3396
3397 sp += addend;
3398 }
3399 SET_ESP(sp, sp_mask);
3400 env->eip = new_eip;
3401 if (is_iret) {
3402 /* NOTE: 'cpl' is the _old_ CPL */
3403 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3404 if (cpl == 0)
3405#ifdef VBOX
3406 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3407#else
3408 eflags_mask |= IOPL_MASK;
3409#endif
3410 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3411 if (cpl <= iopl)
3412 eflags_mask |= IF_MASK;
3413 if (shift == 0)
3414 eflags_mask &= 0xffff;
3415 load_eflags(new_eflags, eflags_mask);
3416 }
3417 return;
3418
3419 return_to_vm86:
3420 POPL(ssp, sp, sp_mask, new_esp);
3421 POPL(ssp, sp, sp_mask, new_ss);
3422 POPL(ssp, sp, sp_mask, new_es);
3423 POPL(ssp, sp, sp_mask, new_ds);
3424 POPL(ssp, sp, sp_mask, new_fs);
3425 POPL(ssp, sp, sp_mask, new_gs);
3426
3427 /* modify processor state */
3428 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3429 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3430 load_seg_vm(R_CS, new_cs & 0xffff);
3431 cpu_x86_set_cpl(env, 3);
3432 load_seg_vm(R_SS, new_ss & 0xffff);
3433 load_seg_vm(R_ES, new_es & 0xffff);
3434 load_seg_vm(R_DS, new_ds & 0xffff);
3435 load_seg_vm(R_FS, new_fs & 0xffff);
3436 load_seg_vm(R_GS, new_gs & 0xffff);
3437
3438 env->eip = new_eip & 0xffff;
3439 ESP = new_esp;
3440}
3441
3442void helper_iret_protected(int shift, int next_eip)
3443{
3444 int tss_selector, type;
3445 uint32_t e1, e2;
3446
3447#ifdef VBOX
3448 e1 = e2 = 0;
3449 remR3TrapClear(env->pVM);
3450#endif
3451
3452 /* specific case for TSS */
3453 if (env->eflags & NT_MASK) {
3454#ifdef TARGET_X86_64
3455 if (env->hflags & HF_LMA_MASK)
3456 raise_exception_err(EXCP0D_GPF, 0);
3457#endif
3458 tss_selector = lduw_kernel(env->tr.base + 0);
3459 if (tss_selector & 4)
3460 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3461 if (load_segment(&e1, &e2, tss_selector) != 0)
3462 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3463 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3464 /* NOTE: we check both segment and busy TSS */
3465 if (type != 3)
3466 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3467 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3468 } else {
3469 helper_ret_protected(shift, 1, 0);
3470 }
3471 env->hflags2 &= ~HF2_NMI_MASK;
3472#ifdef USE_KQEMU
3473 if (kqemu_is_ok(env)) {
3474 CC_OP = CC_OP_EFLAGS;
3475 env->exception_index = -1;
3476 cpu_loop_exit();
3477 }
3478#endif
3479}
3480
3481void helper_lret_protected(int shift, int addend)
3482{
3483 helper_ret_protected(shift, 0, addend);
3484#ifdef USE_KQEMU
3485 if (kqemu_is_ok(env)) {
3486 env->exception_index = -1;
3487 cpu_loop_exit();
3488 }
3489#endif
3490}
3491
3492void helper_sysenter(void)
3493{
3494 if (env->sysenter_cs == 0) {
3495 raise_exception_err(EXCP0D_GPF, 0);
3496 }
3497 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3498 cpu_x86_set_cpl(env, 0);
3499
3500#ifdef TARGET_X86_64
3501 if (env->hflags & HF_LMA_MASK) {
3502 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3503 0, 0xffffffff,
3504 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3505 DESC_S_MASK |
3506 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3507 } else
3508#endif
3509 {
3510 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3511 0, 0xffffffff,
3512 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3513 DESC_S_MASK |
3514 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3515 }
3516 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3517 0, 0xffffffff,
3518 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3519 DESC_S_MASK |
3520 DESC_W_MASK | DESC_A_MASK);
3521 ESP = env->sysenter_esp;
3522 EIP = env->sysenter_eip;
3523}
3524
3525void helper_sysexit(int dflag)
3526{
3527 int cpl;
3528
3529 cpl = env->hflags & HF_CPL_MASK;
3530 if (env->sysenter_cs == 0 || cpl != 0) {
3531 raise_exception_err(EXCP0D_GPF, 0);
3532 }
3533 cpu_x86_set_cpl(env, 3);
3534#ifdef TARGET_X86_64
3535 if (dflag == 2) {
3536 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3537 0, 0xffffffff,
3538 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3539 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3540 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3541 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3542 0, 0xffffffff,
3543 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3544 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3545 DESC_W_MASK | DESC_A_MASK);
3546 } else
3547#endif
3548 {
3549 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3550 0, 0xffffffff,
3551 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3552 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3553 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3554 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3555 0, 0xffffffff,
3556 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3557 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3558 DESC_W_MASK | DESC_A_MASK);
3559 }
3560 ESP = ECX;
3561 EIP = EDX;
3562#ifdef USE_KQEMU
3563 if (kqemu_is_ok(env)) {
3564 env->exception_index = -1;
3565 cpu_loop_exit();
3566 }
3567#endif
3568}
3569
3570#if defined(CONFIG_USER_ONLY)
3571target_ulong helper_read_crN(int reg)
3572{
3573 return 0;
3574}
3575
3576void helper_write_crN(int reg, target_ulong t0)
3577{
3578}
3579#else
3580target_ulong helper_read_crN(int reg)
3581{
3582 target_ulong val;
3583
3584 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3585 switch(reg) {
3586 default:
3587 val = env->cr[reg];
3588 break;
3589 case 8:
3590 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3591 val = cpu_get_apic_tpr(env);
3592 } else {
3593 val = env->v_tpr;
3594 }
3595 break;
3596 }
3597 return val;
3598}
3599
3600void helper_write_crN(int reg, target_ulong t0)
3601{
3602 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3603 switch(reg) {
3604 case 0:
3605 cpu_x86_update_cr0(env, t0);
3606 break;
3607 case 3:
3608 cpu_x86_update_cr3(env, t0);
3609 break;
3610 case 4:
3611 cpu_x86_update_cr4(env, t0);
3612 break;
3613 case 8:
3614 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3615 cpu_set_apic_tpr(env, t0);
3616 }
3617 env->v_tpr = t0 & 0x0f;
3618 break;
3619 default:
3620 env->cr[reg] = t0;
3621 break;
3622 }
3623}
3624#endif
3625
3626void helper_lmsw(target_ulong t0)
3627{
3628 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3629 if already set to one. */
3630 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3631 helper_write_crN(0, t0);
3632}
3633
3634void helper_clts(void)
3635{
3636 env->cr[0] &= ~CR0_TS_MASK;
3637 env->hflags &= ~HF_TS_MASK;
3638}
3639
3640/* XXX: do more */
3641void helper_movl_drN_T0(int reg, target_ulong t0)
3642{
3643 env->dr[reg] = t0;
3644}
3645
3646void helper_invlpg(target_ulong addr)
3647{
3648 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3649 tlb_flush_page(env, addr);
3650}
3651
3652void helper_rdtsc(void)
3653{
3654 uint64_t val;
3655
3656 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3657 raise_exception(EXCP0D_GPF);
3658 }
3659 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3660
3661 val = cpu_get_tsc(env) + env->tsc_offset;
3662 EAX = (uint32_t)(val);
3663 EDX = (uint32_t)(val >> 32);
3664}
3665
3666#ifdef VBOX
3667void helper_rdtscp(void)
3668{
3669 uint64_t val;
3670 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3671 raise_exception(EXCP0D_GPF);
3672 }
3673
3674 val = cpu_get_tsc(env);
3675 EAX = (uint32_t)(val);
3676 EDX = (uint32_t)(val >> 32);
3677 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3678}
3679#endif
3680
3681void helper_rdpmc(void)
3682{
3683 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3684 raise_exception(EXCP0D_GPF);
3685 }
3686 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3687
3688 /* currently unimplemented */
3689 raise_exception_err(EXCP06_ILLOP, 0);
3690}
3691
3692#if defined(CONFIG_USER_ONLY)
3693void helper_wrmsr(void)
3694{
3695}
3696
3697void helper_rdmsr(void)
3698{
3699}
3700#else
3701void helper_wrmsr(void)
3702{
3703 uint64_t val;
3704
3705 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3706
3707 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3708
3709 switch((uint32_t)ECX) {
3710 case MSR_IA32_SYSENTER_CS:
3711 env->sysenter_cs = val & 0xffff;
3712 break;
3713 case MSR_IA32_SYSENTER_ESP:
3714 env->sysenter_esp = val;
3715 break;
3716 case MSR_IA32_SYSENTER_EIP:
3717 env->sysenter_eip = val;
3718 break;
3719 case MSR_IA32_APICBASE:
3720 cpu_set_apic_base(env, val);
3721 break;
3722 case MSR_EFER:
3723 {
3724 uint64_t update_mask;
3725 update_mask = 0;
3726 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3727 update_mask |= MSR_EFER_SCE;
3728 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3729 update_mask |= MSR_EFER_LME;
3730 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3731 update_mask |= MSR_EFER_FFXSR;
3732 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3733 update_mask |= MSR_EFER_NXE;
3734 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3735 update_mask |= MSR_EFER_SVME;
3736 cpu_load_efer(env, (env->efer & ~update_mask) |
3737 (val & update_mask));
3738 }
3739 break;
3740 case MSR_STAR:
3741 env->star = val;
3742 break;
3743 case MSR_PAT:
3744 env->pat = val;
3745 break;
3746 case MSR_VM_HSAVE_PA:
3747 env->vm_hsave = val;
3748 break;
3749#ifdef TARGET_X86_64
3750 case MSR_LSTAR:
3751 env->lstar = val;
3752 break;
3753 case MSR_CSTAR:
3754 env->cstar = val;
3755 break;
3756 case MSR_FMASK:
3757 env->fmask = val;
3758 break;
3759 case MSR_FSBASE:
3760 env->segs[R_FS].base = val;
3761 break;
3762 case MSR_GSBASE:
3763 env->segs[R_GS].base = val;
3764 break;
3765 case MSR_KERNELGSBASE:
3766 env->kernelgsbase = val;
3767 break;
3768#endif
3769 default:
3770#ifndef VBOX
3771 /* XXX: exception ? */
3772 break;
3773#else /* VBOX */
3774 {
3775 uint32_t ecx = (uint32_t)ECX;
3776 /* In X2APIC specification this range is reserved for APIC control. */
3777 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3778 cpu_apic_wrmsr(env, ecx, val);
3779 /** @todo else exception? */
3780 break;
3781 }
3782 case MSR_K8_TSC_AUX:
3783 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3784 break;
3785#endif /* VBOX */
3786 }
3787}
3788
3789void helper_rdmsr(void)
3790{
3791 uint64_t val;
3792
3793 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3794
3795 switch((uint32_t)ECX) {
3796 case MSR_IA32_SYSENTER_CS:
3797 val = env->sysenter_cs;
3798 break;
3799 case MSR_IA32_SYSENTER_ESP:
3800 val = env->sysenter_esp;
3801 break;
3802 case MSR_IA32_SYSENTER_EIP:
3803 val = env->sysenter_eip;
3804 break;
3805 case MSR_IA32_APICBASE:
3806 val = cpu_get_apic_base(env);
3807 break;
3808 case MSR_EFER:
3809 val = env->efer;
3810 break;
3811 case MSR_STAR:
3812 val = env->star;
3813 break;
3814 case MSR_PAT:
3815 val = env->pat;
3816 break;
3817 case MSR_VM_HSAVE_PA:
3818 val = env->vm_hsave;
3819 break;
3820 case MSR_IA32_PERF_STATUS:
3821 /* tsc_increment_by_tick */
3822 val = 1000ULL;
3823 /* CPU multiplier */
3824 val |= (((uint64_t)4ULL) << 40);
3825 break;
3826#ifdef TARGET_X86_64
3827 case MSR_LSTAR:
3828 val = env->lstar;
3829 break;
3830 case MSR_CSTAR:
3831 val = env->cstar;
3832 break;
3833 case MSR_FMASK:
3834 val = env->fmask;
3835 break;
3836 case MSR_FSBASE:
3837 val = env->segs[R_FS].base;
3838 break;
3839 case MSR_GSBASE:
3840 val = env->segs[R_GS].base;
3841 break;
3842 case MSR_KERNELGSBASE:
3843 val = env->kernelgsbase;
3844 break;
3845#endif
3846#ifdef USE_KQEMU
3847 case MSR_QPI_COMMBASE:
3848 if (env->kqemu_enabled) {
3849 val = kqemu_comm_base;
3850 } else {
3851 val = 0;
3852 }
3853 break;
3854#endif
3855 default:
3856#ifndef VBOX
3857 /* XXX: exception ? */
3858 val = 0;
3859 break;
3860#else /* VBOX */
3861 {
3862 uint32_t ecx = (uint32_t)ECX;
3863 /* In X2APIC specification this range is reserved for APIC control. */
3864 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3865 val = cpu_apic_rdmsr(env, ecx);
3866 else
3867 val = 0; /** @todo else exception? */
3868 break;
3869 }
3870 case MSR_K8_TSC_AUX:
3871 val = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3872 break;
3873#endif /* VBOX */
3874 }
3875 EAX = (uint32_t)(val);
3876 EDX = (uint32_t)(val >> 32);
3877}
3878#endif
3879
3880target_ulong helper_lsl(target_ulong selector1)
3881{
3882 unsigned int limit;
3883 uint32_t e1, e2, eflags, selector;
3884 int rpl, dpl, cpl, type;
3885
3886 selector = selector1 & 0xffff;
3887 eflags = cc_table[CC_OP].compute_all();
3888 if (load_segment(&e1, &e2, selector) != 0)
3889 goto fail;
3890 rpl = selector & 3;
3891 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3892 cpl = env->hflags & HF_CPL_MASK;
3893 if (e2 & DESC_S_MASK) {
3894 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3895 /* conforming */
3896 } else {
3897 if (dpl < cpl || dpl < rpl)
3898 goto fail;
3899 }
3900 } else {
3901 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3902 switch(type) {
3903 case 1:
3904 case 2:
3905 case 3:
3906 case 9:
3907 case 11:
3908 break;
3909 default:
3910 goto fail;
3911 }
3912 if (dpl < cpl || dpl < rpl) {
3913 fail:
3914 CC_SRC = eflags & ~CC_Z;
3915 return 0;
3916 }
3917 }
3918 limit = get_seg_limit(e1, e2);
3919 CC_SRC = eflags | CC_Z;
3920 return limit;
3921}
3922
3923target_ulong helper_lar(target_ulong selector1)
3924{
3925 uint32_t e1, e2, eflags, selector;
3926 int rpl, dpl, cpl, type;
3927
3928 selector = selector1 & 0xffff;
3929 eflags = cc_table[CC_OP].compute_all();
3930 if ((selector & 0xfffc) == 0)
3931 goto fail;
3932 if (load_segment(&e1, &e2, selector) != 0)
3933 goto fail;
3934 rpl = selector & 3;
3935 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3936 cpl = env->hflags & HF_CPL_MASK;
3937 if (e2 & DESC_S_MASK) {
3938 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3939 /* conforming */
3940 } else {
3941 if (dpl < cpl || dpl < rpl)
3942 goto fail;
3943 }
3944 } else {
3945 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3946 switch(type) {
3947 case 1:
3948 case 2:
3949 case 3:
3950 case 4:
3951 case 5:
3952 case 9:
3953 case 11:
3954 case 12:
3955 break;
3956 default:
3957 goto fail;
3958 }
3959 if (dpl < cpl || dpl < rpl) {
3960 fail:
3961 CC_SRC = eflags & ~CC_Z;
3962 return 0;
3963 }
3964 }
3965 CC_SRC = eflags | CC_Z;
3966 return e2 & 0x00f0ff00;
3967}
3968
3969void helper_verr(target_ulong selector1)
3970{
3971 uint32_t e1, e2, eflags, selector;
3972 int rpl, dpl, cpl;
3973
3974 selector = selector1 & 0xffff;
3975 eflags = cc_table[CC_OP].compute_all();
3976 if ((selector & 0xfffc) == 0)
3977 goto fail;
3978 if (load_segment(&e1, &e2, selector) != 0)
3979 goto fail;
3980 if (!(e2 & DESC_S_MASK))
3981 goto fail;
3982 rpl = selector & 3;
3983 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3984 cpl = env->hflags & HF_CPL_MASK;
3985 if (e2 & DESC_CS_MASK) {
3986 if (!(e2 & DESC_R_MASK))
3987 goto fail;
3988 if (!(e2 & DESC_C_MASK)) {
3989 if (dpl < cpl || dpl < rpl)
3990 goto fail;
3991 }
3992 } else {
3993 if (dpl < cpl || dpl < rpl) {
3994 fail:
3995 CC_SRC = eflags & ~CC_Z;
3996 return;
3997 }
3998 }
3999 CC_SRC = eflags | CC_Z;
4000}
4001
4002void helper_verw(target_ulong selector1)
4003{
4004 uint32_t e1, e2, eflags, selector;
4005 int rpl, dpl, cpl;
4006
4007 selector = selector1 & 0xffff;
4008 eflags = cc_table[CC_OP].compute_all();
4009 if ((selector & 0xfffc) == 0)
4010 goto fail;
4011 if (load_segment(&e1, &e2, selector) != 0)
4012 goto fail;
4013 if (!(e2 & DESC_S_MASK))
4014 goto fail;
4015 rpl = selector & 3;
4016 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4017 cpl = env->hflags & HF_CPL_MASK;
4018 if (e2 & DESC_CS_MASK) {
4019 goto fail;
4020 } else {
4021 if (dpl < cpl || dpl < rpl)
4022 goto fail;
4023 if (!(e2 & DESC_W_MASK)) {
4024 fail:
4025 CC_SRC = eflags & ~CC_Z;
4026 return;
4027 }
4028 }
4029 CC_SRC = eflags | CC_Z;
4030}
4031
4032/* x87 FPU helpers */
4033
4034static void fpu_set_exception(int mask)
4035{
4036 env->fpus |= mask;
4037 if (env->fpus & (~env->fpuc & FPUC_EM))
4038 env->fpus |= FPUS_SE | FPUS_B;
4039}
4040
4041#ifndef VBOX
4042static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4043#else /* VBOX */
4044DECLINLINE(CPU86_LDouble) helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4045#endif /* VBOX */
4046{
4047 if (b == 0.0)
4048 fpu_set_exception(FPUS_ZE);
4049 return a / b;
4050}
4051
4052void fpu_raise_exception(void)
4053{
4054 if (env->cr[0] & CR0_NE_MASK) {
4055 raise_exception(EXCP10_COPR);
4056 }
4057#if !defined(CONFIG_USER_ONLY)
4058 else {
4059 cpu_set_ferr(env);
4060 }
4061#endif
4062}
4063
4064void helper_flds_FT0(uint32_t val)
4065{
4066 union {
4067 float32 f;
4068 uint32_t i;
4069 } u;
4070 u.i = val;
4071 FT0 = float32_to_floatx(u.f, &env->fp_status);
4072}
4073
4074void helper_fldl_FT0(uint64_t val)
4075{
4076 union {
4077 float64 f;
4078 uint64_t i;
4079 } u;
4080 u.i = val;
4081 FT0 = float64_to_floatx(u.f, &env->fp_status);
4082}
4083
4084void helper_fildl_FT0(int32_t val)
4085{
4086 FT0 = int32_to_floatx(val, &env->fp_status);
4087}
4088
4089void helper_flds_ST0(uint32_t val)
4090{
4091 int new_fpstt;
4092 union {
4093 float32 f;
4094 uint32_t i;
4095 } u;
4096 new_fpstt = (env->fpstt - 1) & 7;
4097 u.i = val;
4098 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4099 env->fpstt = new_fpstt;
4100 env->fptags[new_fpstt] = 0; /* validate stack entry */
4101}
4102
4103void helper_fldl_ST0(uint64_t val)
4104{
4105 int new_fpstt;
4106 union {
4107 float64 f;
4108 uint64_t i;
4109 } u;
4110 new_fpstt = (env->fpstt - 1) & 7;
4111 u.i = val;
4112 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4113 env->fpstt = new_fpstt;
4114 env->fptags[new_fpstt] = 0; /* validate stack entry */
4115}
4116
4117void helper_fildl_ST0(int32_t val)
4118{
4119 int new_fpstt;
4120 new_fpstt = (env->fpstt - 1) & 7;
4121 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4122 env->fpstt = new_fpstt;
4123 env->fptags[new_fpstt] = 0; /* validate stack entry */
4124}
4125
4126void helper_fildll_ST0(int64_t val)
4127{
4128 int new_fpstt;
4129 new_fpstt = (env->fpstt - 1) & 7;
4130 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4131 env->fpstt = new_fpstt;
4132 env->fptags[new_fpstt] = 0; /* validate stack entry */
4133}
4134
4135#ifndef VBOX
4136uint32_t helper_fsts_ST0(void)
4137#else
4138RTCCUINTREG helper_fsts_ST0(void)
4139#endif
4140{
4141 union {
4142 float32 f;
4143 uint32_t i;
4144 } u;
4145 u.f = floatx_to_float32(ST0, &env->fp_status);
4146 return u.i;
4147}
4148
4149uint64_t helper_fstl_ST0(void)
4150{
4151 union {
4152 float64 f;
4153 uint64_t i;
4154 } u;
4155 u.f = floatx_to_float64(ST0, &env->fp_status);
4156 return u.i;
4157}
4158#ifndef VBOX
4159int32_t helper_fist_ST0(void)
4160#else
4161RTCCINTREG helper_fist_ST0(void)
4162#endif
4163{
4164 int32_t val;
4165 val = floatx_to_int32(ST0, &env->fp_status);
4166 if (val != (int16_t)val)
4167 val = -32768;
4168 return val;
4169}
4170
4171#ifndef VBOX
4172int32_t helper_fistl_ST0(void)
4173#else
4174RTCCINTREG helper_fistl_ST0(void)
4175#endif
4176{
4177 int32_t val;
4178 val = floatx_to_int32(ST0, &env->fp_status);
4179 return val;
4180}
4181
4182int64_t helper_fistll_ST0(void)
4183{
4184 int64_t val;
4185 val = floatx_to_int64(ST0, &env->fp_status);
4186 return val;
4187}
4188
4189#ifndef VBOX
4190int32_t helper_fistt_ST0(void)
4191#else
4192RTCCINTREG helper_fistt_ST0(void)
4193#endif
4194{
4195 int32_t val;
4196 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4197 if (val != (int16_t)val)
4198 val = -32768;
4199 return val;
4200}
4201
4202#ifndef VBOX
4203int32_t helper_fisttl_ST0(void)
4204#else
4205RTCCINTREG helper_fisttl_ST0(void)
4206#endif
4207{
4208 int32_t val;
4209 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4210 return val;
4211}
4212
4213int64_t helper_fisttll_ST0(void)
4214{
4215 int64_t val;
4216 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4217 return val;
4218}
4219
4220void helper_fldt_ST0(target_ulong ptr)
4221{
4222 int new_fpstt;
4223 new_fpstt = (env->fpstt - 1) & 7;
4224 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4225 env->fpstt = new_fpstt;
4226 env->fptags[new_fpstt] = 0; /* validate stack entry */
4227}
4228
4229void helper_fstt_ST0(target_ulong ptr)
4230{
4231 helper_fstt(ST0, ptr);
4232}
4233
4234void helper_fpush(void)
4235{
4236 fpush();
4237}
4238
4239void helper_fpop(void)
4240{
4241 fpop();
4242}
4243
4244void helper_fdecstp(void)
4245{
4246 env->fpstt = (env->fpstt - 1) & 7;
4247 env->fpus &= (~0x4700);
4248}
4249
4250void helper_fincstp(void)
4251{
4252 env->fpstt = (env->fpstt + 1) & 7;
4253 env->fpus &= (~0x4700);
4254}
4255
4256/* FPU move */
4257
4258void helper_ffree_STN(int st_index)
4259{
4260 env->fptags[(env->fpstt + st_index) & 7] = 1;
4261}
4262
4263void helper_fmov_ST0_FT0(void)
4264{
4265 ST0 = FT0;
4266}
4267
4268void helper_fmov_FT0_STN(int st_index)
4269{
4270 FT0 = ST(st_index);
4271}
4272
4273void helper_fmov_ST0_STN(int st_index)
4274{
4275 ST0 = ST(st_index);
4276}
4277
4278void helper_fmov_STN_ST0(int st_index)
4279{
4280 ST(st_index) = ST0;
4281}
4282
4283void helper_fxchg_ST0_STN(int st_index)
4284{
4285 CPU86_LDouble tmp;
4286 tmp = ST(st_index);
4287 ST(st_index) = ST0;
4288 ST0 = tmp;
4289}
4290
4291/* FPU operations */
4292
4293static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4294
4295void helper_fcom_ST0_FT0(void)
4296{
4297 int ret;
4298
4299 ret = floatx_compare(ST0, FT0, &env->fp_status);
4300 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4301 FORCE_RET();
4302}
4303
4304void helper_fucom_ST0_FT0(void)
4305{
4306 int ret;
4307
4308 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4309 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4310 FORCE_RET();
4311}
4312
4313static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4314
4315void helper_fcomi_ST0_FT0(void)
4316{
4317 int eflags;
4318 int ret;
4319
4320 ret = floatx_compare(ST0, FT0, &env->fp_status);
4321 eflags = cc_table[CC_OP].compute_all();
4322 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4323 CC_SRC = eflags;
4324 FORCE_RET();
4325}
4326
4327void helper_fucomi_ST0_FT0(void)
4328{
4329 int eflags;
4330 int ret;
4331
4332 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4333 eflags = cc_table[CC_OP].compute_all();
4334 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4335 CC_SRC = eflags;
4336 FORCE_RET();
4337}
4338
4339void helper_fadd_ST0_FT0(void)
4340{
4341 ST0 += FT0;
4342}
4343
4344void helper_fmul_ST0_FT0(void)
4345{
4346 ST0 *= FT0;
4347}
4348
4349void helper_fsub_ST0_FT0(void)
4350{
4351 ST0 -= FT0;
4352}
4353
4354void helper_fsubr_ST0_FT0(void)
4355{
4356 ST0 = FT0 - ST0;
4357}
4358
4359void helper_fdiv_ST0_FT0(void)
4360{
4361 ST0 = helper_fdiv(ST0, FT0);
4362}
4363
4364void helper_fdivr_ST0_FT0(void)
4365{
4366 ST0 = helper_fdiv(FT0, ST0);
4367}
4368
4369/* fp operations between STN and ST0 */
4370
4371void helper_fadd_STN_ST0(int st_index)
4372{
4373 ST(st_index) += ST0;
4374}
4375
4376void helper_fmul_STN_ST0(int st_index)
4377{
4378 ST(st_index) *= ST0;
4379}
4380
4381void helper_fsub_STN_ST0(int st_index)
4382{
4383 ST(st_index) -= ST0;
4384}
4385
4386void helper_fsubr_STN_ST0(int st_index)
4387{
4388 CPU86_LDouble *p;
4389 p = &ST(st_index);
4390 *p = ST0 - *p;
4391}
4392
4393void helper_fdiv_STN_ST0(int st_index)
4394{
4395 CPU86_LDouble *p;
4396 p = &ST(st_index);
4397 *p = helper_fdiv(*p, ST0);
4398}
4399
4400void helper_fdivr_STN_ST0(int st_index)
4401{
4402 CPU86_LDouble *p;
4403 p = &ST(st_index);
4404 *p = helper_fdiv(ST0, *p);
4405}
4406
4407/* misc FPU operations */
4408void helper_fchs_ST0(void)
4409{
4410 ST0 = floatx_chs(ST0);
4411}
4412
4413void helper_fabs_ST0(void)
4414{
4415 ST0 = floatx_abs(ST0);
4416}
4417
4418void helper_fld1_ST0(void)
4419{
4420 ST0 = f15rk[1];
4421}
4422
4423void helper_fldl2t_ST0(void)
4424{
4425 ST0 = f15rk[6];
4426}
4427
4428void helper_fldl2e_ST0(void)
4429{
4430 ST0 = f15rk[5];
4431}
4432
4433void helper_fldpi_ST0(void)
4434{
4435 ST0 = f15rk[2];
4436}
4437
4438void helper_fldlg2_ST0(void)
4439{
4440 ST0 = f15rk[3];
4441}
4442
4443void helper_fldln2_ST0(void)
4444{
4445 ST0 = f15rk[4];
4446}
4447
4448void helper_fldz_ST0(void)
4449{
4450 ST0 = f15rk[0];
4451}
4452
4453void helper_fldz_FT0(void)
4454{
4455 FT0 = f15rk[0];
4456}
4457
4458#ifndef VBOX
4459uint32_t helper_fnstsw(void)
4460#else
4461RTCCUINTREG helper_fnstsw(void)
4462#endif
4463{
4464 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4465}
4466
4467#ifndef VBOX
4468uint32_t helper_fnstcw(void)
4469#else
4470RTCCUINTREG helper_fnstcw(void)
4471#endif
4472{
4473 return env->fpuc;
4474}
4475
4476static void update_fp_status(void)
4477{
4478 int rnd_type;
4479
4480 /* set rounding mode */
4481 switch(env->fpuc & RC_MASK) {
4482 default:
4483 case RC_NEAR:
4484 rnd_type = float_round_nearest_even;
4485 break;
4486 case RC_DOWN:
4487 rnd_type = float_round_down;
4488 break;
4489 case RC_UP:
4490 rnd_type = float_round_up;
4491 break;
4492 case RC_CHOP:
4493 rnd_type = float_round_to_zero;
4494 break;
4495 }
4496 set_float_rounding_mode(rnd_type, &env->fp_status);
4497#ifdef FLOATX80
4498 switch((env->fpuc >> 8) & 3) {
4499 case 0:
4500 rnd_type = 32;
4501 break;
4502 case 2:
4503 rnd_type = 64;
4504 break;
4505 case 3:
4506 default:
4507 rnd_type = 80;
4508 break;
4509 }
4510 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4511#endif
4512}
4513
4514void helper_fldcw(uint32_t val)
4515{
4516 env->fpuc = val;
4517 update_fp_status();
4518}
4519
4520void helper_fclex(void)
4521{
4522 env->fpus &= 0x7f00;
4523}
4524
4525void helper_fwait(void)
4526{
4527 if (env->fpus & FPUS_SE)
4528 fpu_raise_exception();
4529 FORCE_RET();
4530}
4531
4532void helper_fninit(void)
4533{
4534 env->fpus = 0;
4535 env->fpstt = 0;
4536 env->fpuc = 0x37f;
4537 env->fptags[0] = 1;
4538 env->fptags[1] = 1;
4539 env->fptags[2] = 1;
4540 env->fptags[3] = 1;
4541 env->fptags[4] = 1;
4542 env->fptags[5] = 1;
4543 env->fptags[6] = 1;
4544 env->fptags[7] = 1;
4545}
4546
4547/* BCD ops */
4548
4549void helper_fbld_ST0(target_ulong ptr)
4550{
4551 CPU86_LDouble tmp;
4552 uint64_t val;
4553 unsigned int v;
4554 int i;
4555
4556 val = 0;
4557 for(i = 8; i >= 0; i--) {
4558 v = ldub(ptr + i);
4559 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4560 }
4561 tmp = val;
4562 if (ldub(ptr + 9) & 0x80)
4563 tmp = -tmp;
4564 fpush();
4565 ST0 = tmp;
4566}
4567
4568void helper_fbst_ST0(target_ulong ptr)
4569{
4570 int v;
4571 target_ulong mem_ref, mem_end;
4572 int64_t val;
4573
4574 val = floatx_to_int64(ST0, &env->fp_status);
4575 mem_ref = ptr;
4576 mem_end = mem_ref + 9;
4577 if (val < 0) {
4578 stb(mem_end, 0x80);
4579 val = -val;
4580 } else {
4581 stb(mem_end, 0x00);
4582 }
4583 while (mem_ref < mem_end) {
4584 if (val == 0)
4585 break;
4586 v = val % 100;
4587 val = val / 100;
4588 v = ((v / 10) << 4) | (v % 10);
4589 stb(mem_ref++, v);
4590 }
4591 while (mem_ref < mem_end) {
4592 stb(mem_ref++, 0);
4593 }
4594}
4595
4596void helper_f2xm1(void)
4597{
4598 ST0 = pow(2.0,ST0) - 1.0;
4599}
4600
4601void helper_fyl2x(void)
4602{
4603 CPU86_LDouble fptemp;
4604
4605 fptemp = ST0;
4606 if (fptemp>0.0){
4607 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4608 ST1 *= fptemp;
4609 fpop();
4610 } else {
4611 env->fpus &= (~0x4700);
4612 env->fpus |= 0x400;
4613 }
4614}
4615
4616void helper_fptan(void)
4617{
4618 CPU86_LDouble fptemp;
4619
4620 fptemp = ST0;
4621 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4622 env->fpus |= 0x400;
4623 } else {
4624 ST0 = tan(fptemp);
4625 fpush();
4626 ST0 = 1.0;
4627 env->fpus &= (~0x400); /* C2 <-- 0 */
4628 /* the above code is for |arg| < 2**52 only */
4629 }
4630}
4631
4632void helper_fpatan(void)
4633{
4634 CPU86_LDouble fptemp, fpsrcop;
4635
4636 fpsrcop = ST1;
4637 fptemp = ST0;
4638 ST1 = atan2(fpsrcop,fptemp);
4639 fpop();
4640}
4641
4642void helper_fxtract(void)
4643{
4644 CPU86_LDoubleU temp;
4645 unsigned int expdif;
4646
4647 temp.d = ST0;
4648 expdif = EXPD(temp) - EXPBIAS;
4649 /*DP exponent bias*/
4650 ST0 = expdif;
4651 fpush();
4652 BIASEXPONENT(temp);
4653 ST0 = temp.d;
4654}
4655
4656#ifdef VBOX
4657#ifdef _MSC_VER
4658/* MSC cannot divide by zero */
4659extern double _Nan;
4660#define NaN _Nan
4661#else
4662#define NaN (0.0 / 0.0)
4663#endif
4664#endif /* VBOX */
4665
4666void helper_fprem1(void)
4667{
4668 CPU86_LDouble dblq, fpsrcop, fptemp;
4669 CPU86_LDoubleU fpsrcop1, fptemp1;
4670 int expdif;
4671 signed long long int q;
4672
4673#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4674 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4675#else
4676 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4677#endif
4678 ST0 = 0.0 / 0.0; /* NaN */
4679 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4680 return;
4681 }
4682
4683 fpsrcop = ST0;
4684 fptemp = ST1;
4685 fpsrcop1.d = fpsrcop;
4686 fptemp1.d = fptemp;
4687 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4688
4689 if (expdif < 0) {
4690 /* optimisation? taken from the AMD docs */
4691 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4692 /* ST0 is unchanged */
4693 return;
4694 }
4695
4696 if (expdif < 53) {
4697 dblq = fpsrcop / fptemp;
4698 /* round dblq towards nearest integer */
4699 dblq = rint(dblq);
4700 ST0 = fpsrcop - fptemp * dblq;
4701
4702 /* convert dblq to q by truncating towards zero */
4703 if (dblq < 0.0)
4704 q = (signed long long int)(-dblq);
4705 else
4706 q = (signed long long int)dblq;
4707
4708 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4709 /* (C0,C3,C1) <-- (q2,q1,q0) */
4710 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4711 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4712 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4713 } else {
4714 env->fpus |= 0x400; /* C2 <-- 1 */
4715 fptemp = pow(2.0, expdif - 50);
4716 fpsrcop = (ST0 / ST1) / fptemp;
4717 /* fpsrcop = integer obtained by chopping */
4718 fpsrcop = (fpsrcop < 0.0) ?
4719 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4720 ST0 -= (ST1 * fpsrcop * fptemp);
4721 }
4722}
4723
4724void helper_fprem(void)
4725{
4726 CPU86_LDouble dblq, fpsrcop, fptemp;
4727 CPU86_LDoubleU fpsrcop1, fptemp1;
4728 int expdif;
4729 signed long long int q;
4730
4731#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4732 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4733#else
4734 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4735#endif
4736 ST0 = 0.0 / 0.0; /* NaN */
4737 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4738 return;
4739 }
4740
4741 fpsrcop = (CPU86_LDouble)ST0;
4742 fptemp = (CPU86_LDouble)ST1;
4743 fpsrcop1.d = fpsrcop;
4744 fptemp1.d = fptemp;
4745 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4746
4747 if (expdif < 0) {
4748 /* optimisation? taken from the AMD docs */
4749 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4750 /* ST0 is unchanged */
4751 return;
4752 }
4753
4754 if ( expdif < 53 ) {
4755 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4756 /* round dblq towards zero */
4757 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4758 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4759
4760 /* convert dblq to q by truncating towards zero */
4761 if (dblq < 0.0)
4762 q = (signed long long int)(-dblq);
4763 else
4764 q = (signed long long int)dblq;
4765
4766 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4767 /* (C0,C3,C1) <-- (q2,q1,q0) */
4768 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4769 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4770 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4771 } else {
4772 int N = 32 + (expdif % 32); /* as per AMD docs */
4773 env->fpus |= 0x400; /* C2 <-- 1 */
4774 fptemp = pow(2.0, (double)(expdif - N));
4775 fpsrcop = (ST0 / ST1) / fptemp;
4776 /* fpsrcop = integer obtained by chopping */
4777 fpsrcop = (fpsrcop < 0.0) ?
4778 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4779 ST0 -= (ST1 * fpsrcop * fptemp);
4780 }
4781}
4782
4783void helper_fyl2xp1(void)
4784{
4785 CPU86_LDouble fptemp;
4786
4787 fptemp = ST0;
4788 if ((fptemp+1.0)>0.0) {
4789 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4790 ST1 *= fptemp;
4791 fpop();
4792 } else {
4793 env->fpus &= (~0x4700);
4794 env->fpus |= 0x400;
4795 }
4796}
4797
4798void helper_fsqrt(void)
4799{
4800 CPU86_LDouble fptemp;
4801
4802 fptemp = ST0;
4803 if (fptemp<0.0) {
4804 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4805 env->fpus |= 0x400;
4806 }
4807 ST0 = sqrt(fptemp);
4808}
4809
4810void helper_fsincos(void)
4811{
4812 CPU86_LDouble fptemp;
4813
4814 fptemp = ST0;
4815 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4816 env->fpus |= 0x400;
4817 } else {
4818 ST0 = sin(fptemp);
4819 fpush();
4820 ST0 = cos(fptemp);
4821 env->fpus &= (~0x400); /* C2 <-- 0 */
4822 /* the above code is for |arg| < 2**63 only */
4823 }
4824}
4825
4826void helper_frndint(void)
4827{
4828 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4829}
4830
4831void helper_fscale(void)
4832{
4833 ST0 = ldexp (ST0, (int)(ST1));
4834}
4835
4836void helper_fsin(void)
4837{
4838 CPU86_LDouble fptemp;
4839
4840 fptemp = ST0;
4841 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4842 env->fpus |= 0x400;
4843 } else {
4844 ST0 = sin(fptemp);
4845 env->fpus &= (~0x400); /* C2 <-- 0 */
4846 /* the above code is for |arg| < 2**53 only */
4847 }
4848}
4849
4850void helper_fcos(void)
4851{
4852 CPU86_LDouble fptemp;
4853
4854 fptemp = ST0;
4855 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4856 env->fpus |= 0x400;
4857 } else {
4858 ST0 = cos(fptemp);
4859 env->fpus &= (~0x400); /* C2 <-- 0 */
4860 /* the above code is for |arg5 < 2**63 only */
4861 }
4862}
4863
4864void helper_fxam_ST0(void)
4865{
4866 CPU86_LDoubleU temp;
4867 int expdif;
4868
4869 temp.d = ST0;
4870
4871 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4872 if (SIGND(temp))
4873 env->fpus |= 0x200; /* C1 <-- 1 */
4874
4875 /* XXX: test fptags too */
4876 expdif = EXPD(temp);
4877 if (expdif == MAXEXPD) {
4878#ifdef USE_X86LDOUBLE
4879 if (MANTD(temp) == 0x8000000000000000ULL)
4880#else
4881 if (MANTD(temp) == 0)
4882#endif
4883 env->fpus |= 0x500 /*Infinity*/;
4884 else
4885 env->fpus |= 0x100 /*NaN*/;
4886 } else if (expdif == 0) {
4887 if (MANTD(temp) == 0)
4888 env->fpus |= 0x4000 /*Zero*/;
4889 else
4890 env->fpus |= 0x4400 /*Denormal*/;
4891 } else {
4892 env->fpus |= 0x400;
4893 }
4894}
4895
4896void helper_fstenv(target_ulong ptr, int data32)
4897{
4898 int fpus, fptag, exp, i;
4899 uint64_t mant;
4900 CPU86_LDoubleU tmp;
4901
4902 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4903 fptag = 0;
4904 for (i=7; i>=0; i--) {
4905 fptag <<= 2;
4906 if (env->fptags[i]) {
4907 fptag |= 3;
4908 } else {
4909 tmp.d = env->fpregs[i].d;
4910 exp = EXPD(tmp);
4911 mant = MANTD(tmp);
4912 if (exp == 0 && mant == 0) {
4913 /* zero */
4914 fptag |= 1;
4915 } else if (exp == 0 || exp == MAXEXPD
4916#ifdef USE_X86LDOUBLE
4917 || (mant & (1LL << 63)) == 0
4918#endif
4919 ) {
4920 /* NaNs, infinity, denormal */
4921 fptag |= 2;
4922 }
4923 }
4924 }
4925 if (data32) {
4926 /* 32 bit */
4927 stl(ptr, env->fpuc);
4928 stl(ptr + 4, fpus);
4929 stl(ptr + 8, fptag);
4930 stl(ptr + 12, 0); /* fpip */
4931 stl(ptr + 16, 0); /* fpcs */
4932 stl(ptr + 20, 0); /* fpoo */
4933 stl(ptr + 24, 0); /* fpos */
4934 } else {
4935 /* 16 bit */
4936 stw(ptr, env->fpuc);
4937 stw(ptr + 2, fpus);
4938 stw(ptr + 4, fptag);
4939 stw(ptr + 6, 0);
4940 stw(ptr + 8, 0);
4941 stw(ptr + 10, 0);
4942 stw(ptr + 12, 0);
4943 }
4944}
4945
4946void helper_fldenv(target_ulong ptr, int data32)
4947{
4948 int i, fpus, fptag;
4949
4950 if (data32) {
4951 env->fpuc = lduw(ptr);
4952 fpus = lduw(ptr + 4);
4953 fptag = lduw(ptr + 8);
4954 }
4955 else {
4956 env->fpuc = lduw(ptr);
4957 fpus = lduw(ptr + 2);
4958 fptag = lduw(ptr + 4);
4959 }
4960 env->fpstt = (fpus >> 11) & 7;
4961 env->fpus = fpus & ~0x3800;
4962 for(i = 0;i < 8; i++) {
4963 env->fptags[i] = ((fptag & 3) == 3);
4964 fptag >>= 2;
4965 }
4966}
4967
4968void helper_fsave(target_ulong ptr, int data32)
4969{
4970 CPU86_LDouble tmp;
4971 int i;
4972
4973 helper_fstenv(ptr, data32);
4974
4975 ptr += (14 << data32);
4976 for(i = 0;i < 8; i++) {
4977 tmp = ST(i);
4978 helper_fstt(tmp, ptr);
4979 ptr += 10;
4980 }
4981
4982 /* fninit */
4983 env->fpus = 0;
4984 env->fpstt = 0;
4985 env->fpuc = 0x37f;
4986 env->fptags[0] = 1;
4987 env->fptags[1] = 1;
4988 env->fptags[2] = 1;
4989 env->fptags[3] = 1;
4990 env->fptags[4] = 1;
4991 env->fptags[5] = 1;
4992 env->fptags[6] = 1;
4993 env->fptags[7] = 1;
4994}
4995
4996void helper_frstor(target_ulong ptr, int data32)
4997{
4998 CPU86_LDouble tmp;
4999 int i;
5000
5001 helper_fldenv(ptr, data32);
5002 ptr += (14 << data32);
5003
5004 for(i = 0;i < 8; i++) {
5005 tmp = helper_fldt(ptr);
5006 ST(i) = tmp;
5007 ptr += 10;
5008 }
5009}
5010
5011void helper_fxsave(target_ulong ptr, int data64)
5012{
5013 int fpus, fptag, i, nb_xmm_regs;
5014 CPU86_LDouble tmp;
5015 target_ulong addr;
5016
5017 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5018 fptag = 0;
5019 for(i = 0; i < 8; i++) {
5020 fptag |= (env->fptags[i] << i);
5021 }
5022 stw(ptr, env->fpuc);
5023 stw(ptr + 2, fpus);
5024 stw(ptr + 4, fptag ^ 0xff);
5025#ifdef TARGET_X86_64
5026 if (data64) {
5027 stq(ptr + 0x08, 0); /* rip */
5028 stq(ptr + 0x10, 0); /* rdp */
5029 } else
5030#endif
5031 {
5032 stl(ptr + 0x08, 0); /* eip */
5033 stl(ptr + 0x0c, 0); /* sel */
5034 stl(ptr + 0x10, 0); /* dp */
5035 stl(ptr + 0x14, 0); /* sel */
5036 }
5037
5038 addr = ptr + 0x20;
5039 for(i = 0;i < 8; i++) {
5040 tmp = ST(i);
5041 helper_fstt(tmp, addr);
5042 addr += 16;
5043 }
5044
5045 if (env->cr[4] & CR4_OSFXSR_MASK) {
5046 /* XXX: finish it */
5047 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5048 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5049 if (env->hflags & HF_CS64_MASK)
5050 nb_xmm_regs = 16;
5051 else
5052 nb_xmm_regs = 8;
5053 addr = ptr + 0xa0;
5054 for(i = 0; i < nb_xmm_regs; i++) {
5055 stq(addr, env->xmm_regs[i].XMM_Q(0));
5056 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5057 addr += 16;
5058 }
5059 }
5060}
5061
5062void helper_fxrstor(target_ulong ptr, int data64)
5063{
5064 int i, fpus, fptag, nb_xmm_regs;
5065 CPU86_LDouble tmp;
5066 target_ulong addr;
5067
5068 env->fpuc = lduw(ptr);
5069 fpus = lduw(ptr + 2);
5070 fptag = lduw(ptr + 4);
5071 env->fpstt = (fpus >> 11) & 7;
5072 env->fpus = fpus & ~0x3800;
5073 fptag ^= 0xff;
5074 for(i = 0;i < 8; i++) {
5075 env->fptags[i] = ((fptag >> i) & 1);
5076 }
5077
5078 addr = ptr + 0x20;
5079 for(i = 0;i < 8; i++) {
5080 tmp = helper_fldt(addr);
5081 ST(i) = tmp;
5082 addr += 16;
5083 }
5084
5085 if (env->cr[4] & CR4_OSFXSR_MASK) {
5086 /* XXX: finish it */
5087 env->mxcsr = ldl(ptr + 0x18);
5088 //ldl(ptr + 0x1c);
5089 if (env->hflags & HF_CS64_MASK)
5090 nb_xmm_regs = 16;
5091 else
5092 nb_xmm_regs = 8;
5093 addr = ptr + 0xa0;
5094 for(i = 0; i < nb_xmm_regs; i++) {
5095#if !defined(VBOX) || __GNUC__ < 4
5096 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5097 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5098#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5099# if 1
5100 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5101 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5102 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5103 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5104# else
5105 /* this works fine on Mac OS X, gcc 4.0.1 */
5106 uint64_t u64 = ldq(addr);
5107 env->xmm_regs[i].XMM_Q(0);
5108 u64 = ldq(addr + 4);
5109 env->xmm_regs[i].XMM_Q(1) = u64;
5110# endif
5111#endif
5112 addr += 16;
5113 }
5114 }
5115}
5116
5117#ifndef USE_X86LDOUBLE
5118
5119void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5120{
5121 CPU86_LDoubleU temp;
5122 int e;
5123
5124 temp.d = f;
5125 /* mantissa */
5126 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5127 /* exponent + sign */
5128 e = EXPD(temp) - EXPBIAS + 16383;
5129 e |= SIGND(temp) >> 16;
5130 *pexp = e;
5131}
5132
5133CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5134{
5135 CPU86_LDoubleU temp;
5136 int e;
5137 uint64_t ll;
5138
5139 /* XXX: handle overflow ? */
5140 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5141 e |= (upper >> 4) & 0x800; /* sign */
5142 ll = (mant >> 11) & ((1LL << 52) - 1);
5143#ifdef __arm__
5144 temp.l.upper = (e << 20) | (ll >> 32);
5145 temp.l.lower = ll;
5146#else
5147 temp.ll = ll | ((uint64_t)e << 52);
5148#endif
5149 return temp.d;
5150}
5151
5152#else
5153
5154void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5155{
5156 CPU86_LDoubleU temp;
5157
5158 temp.d = f;
5159 *pmant = temp.l.lower;
5160 *pexp = temp.l.upper;
5161}
5162
5163CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5164{
5165 CPU86_LDoubleU temp;
5166
5167 temp.l.upper = upper;
5168 temp.l.lower = mant;
5169 return temp.d;
5170}
5171#endif
5172
5173#ifdef TARGET_X86_64
5174
5175//#define DEBUG_MULDIV
5176
5177static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5178{
5179 *plow += a;
5180 /* carry test */
5181 if (*plow < a)
5182 (*phigh)++;
5183 *phigh += b;
5184}
5185
5186static void neg128(uint64_t *plow, uint64_t *phigh)
5187{
5188 *plow = ~ *plow;
5189 *phigh = ~ *phigh;
5190 add128(plow, phigh, 1, 0);
5191}
5192
5193/* return TRUE if overflow */
5194static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5195{
5196 uint64_t q, r, a1, a0;
5197 int i, qb, ab;
5198
5199 a0 = *plow;
5200 a1 = *phigh;
5201 if (a1 == 0) {
5202 q = a0 / b;
5203 r = a0 % b;
5204 *plow = q;
5205 *phigh = r;
5206 } else {
5207 if (a1 >= b)
5208 return 1;
5209 /* XXX: use a better algorithm */
5210 for(i = 0; i < 64; i++) {
5211 ab = a1 >> 63;
5212 a1 = (a1 << 1) | (a0 >> 63);
5213 if (ab || a1 >= b) {
5214 a1 -= b;
5215 qb = 1;
5216 } else {
5217 qb = 0;
5218 }
5219 a0 = (a0 << 1) | qb;
5220 }
5221#if defined(DEBUG_MULDIV)
5222 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5223 *phigh, *plow, b, a0, a1);
5224#endif
5225 *plow = a0;
5226 *phigh = a1;
5227 }
5228 return 0;
5229}
5230
5231/* return TRUE if overflow */
5232static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5233{
5234 int sa, sb;
5235 sa = ((int64_t)*phigh < 0);
5236 if (sa)
5237 neg128(plow, phigh);
5238 sb = (b < 0);
5239 if (sb)
5240 b = -b;
5241 if (div64(plow, phigh, b) != 0)
5242 return 1;
5243 if (sa ^ sb) {
5244 if (*plow > (1ULL << 63))
5245 return 1;
5246 *plow = - *plow;
5247 } else {
5248 if (*plow >= (1ULL << 63))
5249 return 1;
5250 }
5251 if (sa)
5252 *phigh = - *phigh;
5253 return 0;
5254}
5255
5256void helper_mulq_EAX_T0(target_ulong t0)
5257{
5258 uint64_t r0, r1;
5259
5260 mulu64(&r0, &r1, EAX, t0);
5261 EAX = r0;
5262 EDX = r1;
5263 CC_DST = r0;
5264 CC_SRC = r1;
5265}
5266
5267void helper_imulq_EAX_T0(target_ulong t0)
5268{
5269 uint64_t r0, r1;
5270
5271 muls64(&r0, &r1, EAX, t0);
5272 EAX = r0;
5273 EDX = r1;
5274 CC_DST = r0;
5275 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5276}
5277
5278target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5279{
5280 uint64_t r0, r1;
5281
5282 muls64(&r0, &r1, t0, t1);
5283 CC_DST = r0;
5284 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5285 return r0;
5286}
5287
5288void helper_divq_EAX(target_ulong t0)
5289{
5290 uint64_t r0, r1;
5291 if (t0 == 0) {
5292 raise_exception(EXCP00_DIVZ);
5293 }
5294 r0 = EAX;
5295 r1 = EDX;
5296 if (div64(&r0, &r1, t0))
5297 raise_exception(EXCP00_DIVZ);
5298 EAX = r0;
5299 EDX = r1;
5300}
5301
5302void helper_idivq_EAX(target_ulong t0)
5303{
5304 uint64_t r0, r1;
5305 if (t0 == 0) {
5306 raise_exception(EXCP00_DIVZ);
5307 }
5308 r0 = EAX;
5309 r1 = EDX;
5310 if (idiv64(&r0, &r1, t0))
5311 raise_exception(EXCP00_DIVZ);
5312 EAX = r0;
5313 EDX = r1;
5314}
5315#endif
5316
5317static void do_hlt(void)
5318{
5319 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5320 env->halted = 1;
5321 env->exception_index = EXCP_HLT;
5322 cpu_loop_exit();
5323}
5324
5325void helper_hlt(int next_eip_addend)
5326{
5327 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5328 EIP += next_eip_addend;
5329
5330 do_hlt();
5331}
5332
5333void helper_monitor(target_ulong ptr)
5334{
5335 if ((uint32_t)ECX != 0)
5336 raise_exception(EXCP0D_GPF);
5337 /* XXX: store address ? */
5338 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5339}
5340
5341void helper_mwait(int next_eip_addend)
5342{
5343 if ((uint32_t)ECX != 0)
5344 raise_exception(EXCP0D_GPF);
5345#ifdef VBOX
5346 helper_hlt(next_eip_addend);
5347#else
5348 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5349 EIP += next_eip_addend;
5350
5351 /* XXX: not complete but not completely erroneous */
5352 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5353 /* more than one CPU: do not sleep because another CPU may
5354 wake this one */
5355 } else {
5356 do_hlt();
5357 }
5358#endif
5359}
5360
5361void helper_debug(void)
5362{
5363 env->exception_index = EXCP_DEBUG;
5364 cpu_loop_exit();
5365}
5366
5367void helper_raise_interrupt(int intno, int next_eip_addend)
5368{
5369 raise_interrupt(intno, 1, 0, next_eip_addend);
5370}
5371
5372void helper_raise_exception(int exception_index)
5373{
5374 raise_exception(exception_index);
5375}
5376
5377void helper_cli(void)
5378{
5379 env->eflags &= ~IF_MASK;
5380}
5381
5382void helper_sti(void)
5383{
5384 env->eflags |= IF_MASK;
5385}
5386
5387#ifdef VBOX
5388void helper_cli_vme(void)
5389{
5390 env->eflags &= ~VIF_MASK;
5391}
5392
5393void helper_sti_vme(void)
5394{
5395 /* First check, then change eflags according to the AMD manual */
5396 if (env->eflags & VIP_MASK) {
5397 raise_exception(EXCP0D_GPF);
5398 }
5399 env->eflags |= VIF_MASK;
5400}
5401#endif
5402
5403#if 0
5404/* vm86plus instructions */
5405void helper_cli_vm(void)
5406{
5407 env->eflags &= ~VIF_MASK;
5408}
5409
5410void helper_sti_vm(void)
5411{
5412 env->eflags |= VIF_MASK;
5413 if (env->eflags & VIP_MASK) {
5414 raise_exception(EXCP0D_GPF);
5415 }
5416}
5417#endif
5418
5419void helper_set_inhibit_irq(void)
5420{
5421 env->hflags |= HF_INHIBIT_IRQ_MASK;
5422}
5423
5424void helper_reset_inhibit_irq(void)
5425{
5426 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5427}
5428
5429void helper_boundw(target_ulong a0, int v)
5430{
5431 int low, high;
5432 low = ldsw(a0);
5433 high = ldsw(a0 + 2);
5434 v = (int16_t)v;
5435 if (v < low || v > high) {
5436 raise_exception(EXCP05_BOUND);
5437 }
5438 FORCE_RET();
5439}
5440
5441void helper_boundl(target_ulong a0, int v)
5442{
5443 int low, high;
5444 low = ldl(a0);
5445 high = ldl(a0 + 4);
5446 if (v < low || v > high) {
5447 raise_exception(EXCP05_BOUND);
5448 }
5449 FORCE_RET();
5450}
5451
5452static float approx_rsqrt(float a)
5453{
5454 return 1.0 / sqrt(a);
5455}
5456
5457static float approx_rcp(float a)
5458{
5459 return 1.0 / a;
5460}
5461
5462#if !defined(CONFIG_USER_ONLY)
5463
5464#define MMUSUFFIX _mmu
5465
5466#define SHIFT 0
5467#include "softmmu_template.h"
5468
5469#define SHIFT 1
5470#include "softmmu_template.h"
5471
5472#define SHIFT 2
5473#include "softmmu_template.h"
5474
5475#define SHIFT 3
5476#include "softmmu_template.h"
5477
5478#endif
5479
5480#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5481/* This code assumes real physical address always fit into host CPU reg,
5482 which is wrong in general, but true for our current use cases. */
5483RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5484{
5485 return remR3PhysReadS8(addr);
5486}
5487RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5488{
5489 return remR3PhysReadU8(addr);
5490}
5491void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5492{
5493 remR3PhysWriteU8(addr, val);
5494}
5495RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5496{
5497 return remR3PhysReadS16(addr);
5498}
5499RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5500{
5501 return remR3PhysReadU16(addr);
5502}
5503void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5504{
5505 remR3PhysWriteU16(addr, val);
5506}
5507RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5508{
5509 return remR3PhysReadS32(addr);
5510}
5511RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5512{
5513 return remR3PhysReadU32(addr);
5514}
5515void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5516{
5517 remR3PhysWriteU32(addr, val);
5518}
5519uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5520{
5521 return remR3PhysReadU64(addr);
5522}
5523void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5524{
5525 remR3PhysWriteU64(addr, val);
5526}
5527#endif
5528
5529/* try to fill the TLB and return an exception if error. If retaddr is
5530 NULL, it means that the function was called in C code (i.e. not
5531 from generated code or from helper.c) */
5532/* XXX: fix it to restore all registers */
5533void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5534{
5535 TranslationBlock *tb;
5536 int ret;
5537 unsigned long pc;
5538 CPUX86State *saved_env;
5539
5540 /* XXX: hack to restore env in all cases, even if not called from
5541 generated code */
5542 saved_env = env;
5543 env = cpu_single_env;
5544
5545 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5546 if (ret) {
5547 if (retaddr) {
5548 /* now we have a real cpu fault */
5549 pc = (unsigned long)retaddr;
5550 tb = tb_find_pc(pc);
5551 if (tb) {
5552 /* the PC is inside the translated code. It means that we have
5553 a virtual CPU fault */
5554 cpu_restore_state(tb, env, pc, NULL);
5555 }
5556 }
5557 raise_exception_err(env->exception_index, env->error_code);
5558 }
5559 env = saved_env;
5560}
5561
5562#ifdef VBOX
5563
5564/**
5565 * Correctly computes the eflags.
5566 * @returns eflags.
5567 * @param env1 CPU environment.
5568 */
5569uint32_t raw_compute_eflags(CPUX86State *env1)
5570{
5571 CPUX86State *savedenv = env;
5572 uint32_t efl;
5573 env = env1;
5574 efl = compute_eflags();
5575 env = savedenv;
5576 return efl;
5577}
5578
5579/**
5580 * Reads byte from virtual address in guest memory area.
5581 * XXX: is it working for any addresses? swapped out pages?
5582 * @returns readed data byte.
5583 * @param env1 CPU environment.
5584 * @param pvAddr GC Virtual address.
5585 */
5586uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5587{
5588 CPUX86State *savedenv = env;
5589 uint8_t u8;
5590 env = env1;
5591 u8 = ldub_kernel(addr);
5592 env = savedenv;
5593 return u8;
5594}
5595
5596/**
5597 * Reads byte from virtual address in guest memory area.
5598 * XXX: is it working for any addresses? swapped out pages?
5599 * @returns readed data byte.
5600 * @param env1 CPU environment.
5601 * @param pvAddr GC Virtual address.
5602 */
5603uint16_t read_word(CPUX86State *env1, target_ulong addr)
5604{
5605 CPUX86State *savedenv = env;
5606 uint16_t u16;
5607 env = env1;
5608 u16 = lduw_kernel(addr);
5609 env = savedenv;
5610 return u16;
5611}
5612
5613/**
5614 * Reads byte from virtual address in guest memory area.
5615 * XXX: is it working for any addresses? swapped out pages?
5616 * @returns readed data byte.
5617 * @param env1 CPU environment.
5618 * @param pvAddr GC Virtual address.
5619 */
5620uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5621{
5622 CPUX86State *savedenv = env;
5623 uint32_t u32;
5624 env = env1;
5625 u32 = ldl_kernel(addr);
5626 env = savedenv;
5627 return u32;
5628}
5629
5630/**
5631 * Writes byte to virtual address in guest memory area.
5632 * XXX: is it working for any addresses? swapped out pages?
5633 * @returns readed data byte.
5634 * @param env1 CPU environment.
5635 * @param pvAddr GC Virtual address.
5636 * @param val byte value
5637 */
5638void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5639{
5640 CPUX86State *savedenv = env;
5641 env = env1;
5642 stb(addr, val);
5643 env = savedenv;
5644}
5645
5646void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5647{
5648 CPUX86State *savedenv = env;
5649 env = env1;
5650 stw(addr, val);
5651 env = savedenv;
5652}
5653
5654void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5655{
5656 CPUX86State *savedenv = env;
5657 env = env1;
5658 stl(addr, val);
5659 env = savedenv;
5660}
5661
5662/**
5663 * Correctly loads selector into segment register with updating internal
5664 * qemu data/caches.
5665 * @param env1 CPU environment.
5666 * @param seg_reg Segment register.
5667 * @param selector Selector to load.
5668 */
5669void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5670{
5671 CPUX86State *savedenv = env;
5672 jmp_buf old_buf;
5673
5674 env = env1;
5675
5676 if ( env->eflags & X86_EFL_VM
5677 || !(env->cr[0] & X86_CR0_PE))
5678 {
5679 load_seg_vm(seg_reg, selector);
5680
5681 env = savedenv;
5682
5683 /* Successful sync. */
5684 env1->segs[seg_reg].newselector = 0;
5685 }
5686 else
5687 {
5688 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5689 time critical - let's not do that */
5690#ifdef FORCE_SEGMENT_SYNC
5691 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5692#endif
5693 if (setjmp(env1->jmp_env) == 0)
5694 {
5695 if (seg_reg == R_CS)
5696 {
5697 uint32_t e1, e2;
5698 e1 = e2 = 0;
5699 load_segment(&e1, &e2, selector);
5700 cpu_x86_load_seg_cache(env, R_CS, selector,
5701 get_seg_base(e1, e2),
5702 get_seg_limit(e1, e2),
5703 e2);
5704 }
5705 else
5706 helper_load_seg(seg_reg, selector);
5707 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5708 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5709
5710 env = savedenv;
5711
5712 /* Successful sync. */
5713 env1->segs[seg_reg].newselector = 0;
5714 }
5715 else
5716 {
5717 env = savedenv;
5718
5719 /* Postpone sync until the guest uses the selector. */
5720 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5721 env1->segs[seg_reg].newselector = selector;
5722 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5723 env1->exception_index = -1;
5724 env1->error_code = 0;
5725 env1->old_exception = -1;
5726 }
5727#ifdef FORCE_SEGMENT_SYNC
5728 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5729#endif
5730 }
5731
5732}
5733
5734DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5735{
5736 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
5737}
5738
5739
5740int emulate_single_instr(CPUX86State *env1)
5741{
5742 TranslationBlock *tb;
5743 TranslationBlock *current;
5744 int flags;
5745 uint8_t *tc_ptr;
5746 target_ulong old_eip;
5747
5748 /* ensures env is loaded! */
5749 CPUX86State *savedenv = env;
5750 env = env1;
5751
5752 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
5753
5754 current = env->current_tb;
5755 env->current_tb = NULL;
5756 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
5757
5758 /*
5759 * Translate only one instruction.
5760 */
5761 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
5762 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
5763 env->segs[R_CS].base, flags, 0);
5764
5765 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
5766
5767
5768 /* tb_link_phys: */
5769 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
5770 tb->jmp_next[0] = NULL;
5771 tb->jmp_next[1] = NULL;
5772 Assert(tb->jmp_next[0] == NULL);
5773 Assert(tb->jmp_next[1] == NULL);
5774 if (tb->tb_next_offset[0] != 0xffff)
5775 tb_reset_jump(tb, 0);
5776 if (tb->tb_next_offset[1] != 0xffff)
5777 tb_reset_jump(tb, 1);
5778
5779 /*
5780 * Execute it using emulation
5781 */
5782 old_eip = env->eip;
5783 env->current_tb = tb;
5784
5785 /*
5786 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
5787 * perhaps not a very safe hack
5788 */
5789 while(old_eip == env->eip)
5790 {
5791 tc_ptr = tb->tc_ptr;
5792
5793#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
5794 int fake_ret;
5795 tcg_qemu_tb_exec(tc_ptr, fake_ret);
5796#else
5797 tcg_qemu_tb_exec(tc_ptr);
5798#endif
5799 /*
5800 * Exit once we detect an external interrupt and interrupts are enabled
5801 */
5802 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
5803 ( (env->eflags & IF_MASK) &&
5804 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
5805 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
5806 {
5807 break;
5808 }
5809 }
5810 env->current_tb = current;
5811
5812 tb_phys_invalidate(tb, -1);
5813 tb_free(tb);
5814/*
5815 Assert(tb->tb_next_offset[0] == 0xffff);
5816 Assert(tb->tb_next_offset[1] == 0xffff);
5817 Assert(tb->tb_next[0] == 0xffff);
5818 Assert(tb->tb_next[1] == 0xffff);
5819 Assert(tb->jmp_next[0] == NULL);
5820 Assert(tb->jmp_next[1] == NULL);
5821 Assert(tb->jmp_first == NULL); */
5822
5823 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
5824
5825 /*
5826 * Execute the next instruction when we encounter instruction fusing.
5827 */
5828 if (env->hflags & HF_INHIBIT_IRQ_MASK)
5829 {
5830 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
5831 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5832 emulate_single_instr(env);
5833 }
5834
5835 env = savedenv;
5836 return 0;
5837}
5838
5839/**
5840 * Correctly loads a new ldtr selector.
5841 *
5842 * @param env1 CPU environment.
5843 * @param selector Selector to load.
5844 */
5845void sync_ldtr(CPUX86State *env1, int selector)
5846{
5847 CPUX86State *saved_env = env;
5848 if (setjmp(env1->jmp_env) == 0)
5849 {
5850 env = env1;
5851 helper_lldt(selector);
5852 env = saved_env;
5853 }
5854 else
5855 {
5856 env = saved_env;
5857#ifdef VBOX_STRICT
5858 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
5859#endif
5860 }
5861}
5862
5863/**
5864 * Correctly loads a new tr selector.
5865 *
5866 * @param env1 CPU environment.
5867 * @param selector Selector to load.
5868 */
5869int sync_tr(CPUX86State *env1, int selector)
5870{
5871 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
5872 SegmentCache *dt;
5873 uint32_t e1, e2;
5874 int index, type, entry_limit;
5875 target_ulong ptr;
5876 CPUX86State *saved_env = env;
5877 env = env1;
5878
5879 selector &= 0xffff;
5880 if ((selector & 0xfffc) == 0) {
5881 /* NULL selector case: invalid TR */
5882 env->tr.base = 0;
5883 env->tr.limit = 0;
5884 env->tr.flags = 0;
5885 } else {
5886 if (selector & 0x4)
5887 goto l_failure;
5888 dt = &env->gdt;
5889 index = selector & ~7;
5890#ifdef TARGET_X86_64
5891 if (env->hflags & HF_LMA_MASK)
5892 entry_limit = 15;
5893 else
5894#endif
5895 entry_limit = 7;
5896 if ((index + entry_limit) > dt->limit)
5897 goto l_failure;
5898 ptr = dt->base + index;
5899 e1 = ldl_kernel(ptr);
5900 e2 = ldl_kernel(ptr + 4);
5901 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5902 if ((e2 & DESC_S_MASK) /*||
5903 (type != 1 && type != 9)*/)
5904 goto l_failure;
5905 if (!(e2 & DESC_P_MASK))
5906 goto l_failure;
5907#ifdef TARGET_X86_64
5908 if (env->hflags & HF_LMA_MASK) {
5909 uint32_t e3;
5910 e3 = ldl_kernel(ptr + 8);
5911 load_seg_cache_raw_dt(&env->tr, e1, e2);
5912 env->tr.base |= (target_ulong)e3 << 32;
5913 } else
5914#endif
5915 {
5916 load_seg_cache_raw_dt(&env->tr, e1, e2);
5917 }
5918 e2 |= DESC_TSS_BUSY_MASK;
5919 stl_kernel(ptr + 4, e2);
5920 }
5921 env->tr.selector = selector;
5922
5923 env = saved_env;
5924 return 0;
5925l_failure:
5926 AssertMsgFailed(("selector=%d\n", selector));
5927 return -1;
5928}
5929
5930
5931int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
5932 uint32_t *esp_ptr, int dpl)
5933{
5934 int type, index, shift;
5935
5936 CPUX86State *savedenv = env;
5937 env = env1;
5938
5939 if (!(env->tr.flags & DESC_P_MASK))
5940 cpu_abort(env, "invalid tss");
5941 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
5942 if ((type & 7) != 1)
5943 cpu_abort(env, "invalid tss type %d", type);
5944 shift = type >> 3;
5945 index = (dpl * 4 + 2) << shift;
5946 if (index + (4 << shift) - 1 > env->tr.limit)
5947 {
5948 env = savedenv;
5949 return 0;
5950 }
5951 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
5952
5953 if (shift == 0) {
5954 *esp_ptr = lduw_kernel(env->tr.base + index);
5955 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
5956 } else {
5957 *esp_ptr = ldl_kernel(env->tr.base + index);
5958 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
5959 }
5960
5961 env = savedenv;
5962 return 1;
5963}
5964
5965//*****************************************************************************
5966// Needs to be at the bottom of the file (overriding macros)
5967
5968#ifndef VBOX
5969static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
5970#else /* VBOX */
5971DECLINLINE(CPU86_LDouble) helper_fldt_raw(uint8_t *ptr)
5972#endif /* VBOX */
5973{
5974 return *(CPU86_LDouble *)ptr;
5975}
5976
5977#ifndef VBOX
5978static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5979#else /* VBOX */
5980DECLINLINE(void) helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
5981#endif /* VBOX */
5982{
5983 *(CPU86_LDouble *)ptr = f;
5984}
5985
5986#undef stw
5987#undef stl
5988#undef stq
5989#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
5990#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
5991#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
5992
5993//*****************************************************************************
5994void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
5995{
5996 int fpus, fptag, i, nb_xmm_regs;
5997 CPU86_LDouble tmp;
5998 uint8_t *addr;
5999 int data64 = !!(env->hflags & HF_LMA_MASK);
6000
6001 if (env->cpuid_features & CPUID_FXSR)
6002 {
6003 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6004 fptag = 0;
6005 for(i = 0; i < 8; i++) {
6006 fptag |= (env->fptags[i] << i);
6007 }
6008 stw(ptr, env->fpuc);
6009 stw(ptr + 2, fpus);
6010 stw(ptr + 4, fptag ^ 0xff);
6011
6012 addr = ptr + 0x20;
6013 for(i = 0;i < 8; i++) {
6014 tmp = ST(i);
6015 helper_fstt_raw(tmp, addr);
6016 addr += 16;
6017 }
6018
6019 if (env->cr[4] & CR4_OSFXSR_MASK) {
6020 /* XXX: finish it */
6021 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6022 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6023 nb_xmm_regs = 8 << data64;
6024 addr = ptr + 0xa0;
6025 for(i = 0; i < nb_xmm_regs; i++) {
6026#if __GNUC__ < 4
6027 stq(addr, env->xmm_regs[i].XMM_Q(0));
6028 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6029#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6030 stl(addr, env->xmm_regs[i].XMM_L(0));
6031 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6032 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6033 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6034#endif
6035 addr += 16;
6036 }
6037 }
6038 }
6039 else
6040 {
6041 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6042 int fptag;
6043
6044 fp->FCW = env->fpuc;
6045 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6046 fptag = 0;
6047 for (i=7; i>=0; i--) {
6048 fptag <<= 2;
6049 if (env->fptags[i]) {
6050 fptag |= 3;
6051 } else {
6052 /* the FPU automatically computes it */
6053 }
6054 }
6055 fp->FTW = fptag;
6056
6057 for(i = 0;i < 8; i++) {
6058 tmp = ST(i);
6059 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
6060 }
6061 }
6062}
6063
6064//*****************************************************************************
6065#undef lduw
6066#undef ldl
6067#undef ldq
6068#define lduw(a) *(uint16_t *)(a)
6069#define ldl(a) *(uint32_t *)(a)
6070#define ldq(a) *(uint64_t *)(a)
6071//*****************************************************************************
6072void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6073{
6074 int i, fpus, fptag, nb_xmm_regs;
6075 CPU86_LDouble tmp;
6076 uint8_t *addr;
6077 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6078
6079 if (env->cpuid_features & CPUID_FXSR)
6080 {
6081 env->fpuc = lduw(ptr);
6082 fpus = lduw(ptr + 2);
6083 fptag = lduw(ptr + 4);
6084 env->fpstt = (fpus >> 11) & 7;
6085 env->fpus = fpus & ~0x3800;
6086 fptag ^= 0xff;
6087 for(i = 0;i < 8; i++) {
6088 env->fptags[i] = ((fptag >> i) & 1);
6089 }
6090
6091 addr = ptr + 0x20;
6092 for(i = 0;i < 8; i++) {
6093 tmp = helper_fldt_raw(addr);
6094 ST(i) = tmp;
6095 addr += 16;
6096 }
6097
6098 if (env->cr[4] & CR4_OSFXSR_MASK) {
6099 /* XXX: finish it, endianness */
6100 env->mxcsr = ldl(ptr + 0x18);
6101 //ldl(ptr + 0x1c);
6102 nb_xmm_regs = 8 << data64;
6103 addr = ptr + 0xa0;
6104 for(i = 0; i < nb_xmm_regs; i++) {
6105#if HC_ARCH_BITS == 32
6106 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6107 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6108 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6109 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6110 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6111#else
6112 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6113 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6114#endif
6115 addr += 16;
6116 }
6117 }
6118 }
6119 else
6120 {
6121 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6122 int fptag, j;
6123
6124 env->fpuc = fp->FCW;
6125 env->fpstt = (fp->FSW >> 11) & 7;
6126 env->fpus = fp->FSW & ~0x3800;
6127 fptag = fp->FTW;
6128 for(i = 0;i < 8; i++) {
6129 env->fptags[i] = ((fptag & 3) == 3);
6130 fptag >>= 2;
6131 }
6132 j = env->fpstt;
6133 for(i = 0;i < 8; i++) {
6134 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
6135 ST(i) = tmp;
6136 }
6137 }
6138}
6139//*****************************************************************************
6140//*****************************************************************************
6141
6142#endif /* VBOX */
6143
6144/* Secure Virtual Machine helpers */
6145
6146#if defined(CONFIG_USER_ONLY)
6147
6148void helper_vmrun(int aflag, int next_eip_addend)
6149{
6150}
6151void helper_vmmcall(void)
6152{
6153}
6154void helper_vmload(int aflag)
6155{
6156}
6157void helper_vmsave(int aflag)
6158{
6159}
6160void helper_stgi(void)
6161{
6162}
6163void helper_clgi(void)
6164{
6165}
6166void helper_skinit(void)
6167{
6168}
6169void helper_invlpga(int aflag)
6170{
6171}
6172void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6173{
6174}
6175void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6176{
6177}
6178
6179void helper_svm_check_io(uint32_t port, uint32_t param,
6180 uint32_t next_eip_addend)
6181{
6182}
6183#else
6184
6185#ifndef VBOX
6186static inline void svm_save_seg(target_phys_addr_t addr,
6187#else /* VBOX */
6188DECLINLINE(void) svm_save_seg(target_phys_addr_t addr,
6189#endif /* VBOX */
6190 const SegmentCache *sc)
6191{
6192 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6193 sc->selector);
6194 stq_phys(addr + offsetof(struct vmcb_seg, base),
6195 sc->base);
6196 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6197 sc->limit);
6198 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6199 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6200}
6201
6202#ifndef VBOX
6203static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6204#else /* VBOX */
6205DECLINLINE(void) svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6206#endif /* VBOX */
6207{
6208 unsigned int flags;
6209
6210 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6211 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6212 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6213 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6214 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6215}
6216
6217#ifndef VBOX
6218static inline void svm_load_seg_cache(target_phys_addr_t addr,
6219#else /* VBOX */
6220DECLINLINE(void) svm_load_seg_cache(target_phys_addr_t addr,
6221#endif /* VBOX */
6222 CPUState *env, int seg_reg)
6223{
6224 SegmentCache sc1, *sc = &sc1;
6225 svm_load_seg(addr, sc);
6226 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6227 sc->base, sc->limit, sc->flags);
6228}
6229
6230void helper_vmrun(int aflag, int next_eip_addend)
6231{
6232 target_ulong addr;
6233 uint32_t event_inj;
6234 uint32_t int_ctl;
6235
6236 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6237
6238 if (aflag == 2)
6239 addr = EAX;
6240 else
6241 addr = (uint32_t)EAX;
6242
6243 if (loglevel & CPU_LOG_TB_IN_ASM)
6244 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
6245
6246 env->vm_vmcb = addr;
6247
6248 /* save the current CPU state in the hsave page */
6249 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6250 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6251
6252 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6253 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6254
6255 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6256 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6257 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6258 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6259 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6260 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6261
6262 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6263 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6264
6265 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6266 &env->segs[R_ES]);
6267 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6268 &env->segs[R_CS]);
6269 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6270 &env->segs[R_SS]);
6271 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6272 &env->segs[R_DS]);
6273
6274 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6275 EIP + next_eip_addend);
6276 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6277 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6278
6279 /* load the interception bitmaps so we do not need to access the
6280 vmcb in svm mode */
6281 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6282 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6283 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6284 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6285 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6286 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6287
6288 /* enable intercepts */
6289 env->hflags |= HF_SVMI_MASK;
6290
6291 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6292
6293 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6294 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6295
6296 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6297 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6298
6299 /* clear exit_info_2 so we behave like the real hardware */
6300 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6301
6302 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6303 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6304 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6305 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6306 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6307 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6308 if (int_ctl & V_INTR_MASKING_MASK) {
6309 env->v_tpr = int_ctl & V_TPR_MASK;
6310 env->hflags2 |= HF2_VINTR_MASK;
6311 if (env->eflags & IF_MASK)
6312 env->hflags2 |= HF2_HIF_MASK;
6313 }
6314
6315 cpu_load_efer(env,
6316 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6317 env->eflags = 0;
6318 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6319 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6320 CC_OP = CC_OP_EFLAGS;
6321
6322 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6323 env, R_ES);
6324 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6325 env, R_CS);
6326 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6327 env, R_SS);
6328 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6329 env, R_DS);
6330
6331 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6332 env->eip = EIP;
6333 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6334 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6335 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6336 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6337 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6338
6339 /* FIXME: guest state consistency checks */
6340
6341 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6342 case TLB_CONTROL_DO_NOTHING:
6343 break;
6344 case TLB_CONTROL_FLUSH_ALL_ASID:
6345 /* FIXME: this is not 100% correct but should work for now */
6346 tlb_flush(env, 1);
6347 break;
6348 }
6349
6350 env->hflags2 |= HF2_GIF_MASK;
6351
6352 if (int_ctl & V_IRQ_MASK) {
6353 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6354 }
6355
6356 /* maybe we need to inject an event */
6357 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6358 if (event_inj & SVM_EVTINJ_VALID) {
6359 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6360 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6361 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6362 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
6363
6364 if (loglevel & CPU_LOG_TB_IN_ASM)
6365 fprintf(logfile, "Injecting(%#hx): ", valid_err);
6366 /* FIXME: need to implement valid_err */
6367 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6368 case SVM_EVTINJ_TYPE_INTR:
6369 env->exception_index = vector;
6370 env->error_code = event_inj_err;
6371 env->exception_is_int = 0;
6372 env->exception_next_eip = -1;
6373 if (loglevel & CPU_LOG_TB_IN_ASM)
6374 fprintf(logfile, "INTR");
6375 /* XXX: is it always correct ? */
6376 do_interrupt(vector, 0, 0, 0, 1);
6377 break;
6378 case SVM_EVTINJ_TYPE_NMI:
6379 env->exception_index = EXCP02_NMI;
6380 env->error_code = event_inj_err;
6381 env->exception_is_int = 0;
6382 env->exception_next_eip = EIP;
6383 if (loglevel & CPU_LOG_TB_IN_ASM)
6384 fprintf(logfile, "NMI");
6385 cpu_loop_exit();
6386 break;
6387 case SVM_EVTINJ_TYPE_EXEPT:
6388 env->exception_index = vector;
6389 env->error_code = event_inj_err;
6390 env->exception_is_int = 0;
6391 env->exception_next_eip = -1;
6392 if (loglevel & CPU_LOG_TB_IN_ASM)
6393 fprintf(logfile, "EXEPT");
6394 cpu_loop_exit();
6395 break;
6396 case SVM_EVTINJ_TYPE_SOFT:
6397 env->exception_index = vector;
6398 env->error_code = event_inj_err;
6399 env->exception_is_int = 1;
6400 env->exception_next_eip = EIP;
6401 if (loglevel & CPU_LOG_TB_IN_ASM)
6402 fprintf(logfile, "SOFT");
6403 cpu_loop_exit();
6404 break;
6405 }
6406 if (loglevel & CPU_LOG_TB_IN_ASM)
6407 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
6408 }
6409}
6410
6411void helper_vmmcall(void)
6412{
6413 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6414 raise_exception(EXCP06_ILLOP);
6415}
6416
6417void helper_vmload(int aflag)
6418{
6419 target_ulong addr;
6420 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6421
6422 if (aflag == 2)
6423 addr = EAX;
6424 else
6425 addr = (uint32_t)EAX;
6426
6427 if (loglevel & CPU_LOG_TB_IN_ASM)
6428 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6429 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6430 env->segs[R_FS].base);
6431
6432 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6433 env, R_FS);
6434 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6435 env, R_GS);
6436 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6437 &env->tr);
6438 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6439 &env->ldt);
6440
6441#ifdef TARGET_X86_64
6442 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6443 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6444 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6445 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6446#endif
6447 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6448 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6449 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6450 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6451}
6452
6453void helper_vmsave(int aflag)
6454{
6455 target_ulong addr;
6456 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6457
6458 if (aflag == 2)
6459 addr = EAX;
6460 else
6461 addr = (uint32_t)EAX;
6462
6463 if (loglevel & CPU_LOG_TB_IN_ASM)
6464 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6465 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6466 env->segs[R_FS].base);
6467
6468 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6469 &env->segs[R_FS]);
6470 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6471 &env->segs[R_GS]);
6472 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6473 &env->tr);
6474 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6475 &env->ldt);
6476
6477#ifdef TARGET_X86_64
6478 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6479 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6480 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6481 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6482#endif
6483 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6484 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6485 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6486 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6487}
6488
6489void helper_stgi(void)
6490{
6491 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6492 env->hflags2 |= HF2_GIF_MASK;
6493}
6494
6495void helper_clgi(void)
6496{
6497 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6498 env->hflags2 &= ~HF2_GIF_MASK;
6499}
6500
6501void helper_skinit(void)
6502{
6503 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6504 /* XXX: not implemented */
6505 raise_exception(EXCP06_ILLOP);
6506}
6507
6508void helper_invlpga(int aflag)
6509{
6510 target_ulong addr;
6511 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6512
6513 if (aflag == 2)
6514 addr = EAX;
6515 else
6516 addr = (uint32_t)EAX;
6517
6518 /* XXX: could use the ASID to see if it is needed to do the
6519 flush */
6520 tlb_flush_page(env, addr);
6521}
6522
6523void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6524{
6525 if (likely(!(env->hflags & HF_SVMI_MASK)))
6526 return;
6527#ifndef VBOX
6528 switch(type) {
6529#ifndef VBOX
6530 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6531#else
6532 case SVM_EXIT_READ_CR0: case SVM_EXIT_READ_CR0 + 1: case SVM_EXIT_READ_CR0 + 2:
6533 case SVM_EXIT_READ_CR0 + 3: case SVM_EXIT_READ_CR0 + 4: case SVM_EXIT_READ_CR0 + 5:
6534 case SVM_EXIT_READ_CR0 + 6: case SVM_EXIT_READ_CR0 + 7: case SVM_EXIT_READ_CR0 + 8:
6535#endif
6536 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6537 helper_vmexit(type, param);
6538 }
6539 break;
6540#ifndef VBOX
6541 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6542#else
6543 case SVM_EXIT_WRITE_CR0: case SVM_EXIT_WRITE_CR0 + 1: case SVM_EXIT_WRITE_CR0 + 2:
6544 case SVM_EXIT_WRITE_CR0 + 3: case SVM_EXIT_WRITE_CR0 + 4: case SVM_EXIT_WRITE_CR0 + 5:
6545 case SVM_EXIT_WRITE_CR0 + 6: case SVM_EXIT_WRITE_CR0 + 7: case SVM_EXIT_WRITE_CR0 + 8:
6546#endif
6547 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6548 helper_vmexit(type, param);
6549 }
6550 break;
6551 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6552 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6553 helper_vmexit(type, param);
6554 }
6555 break;
6556 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6557 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6558 helper_vmexit(type, param);
6559 }
6560 break;
6561 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6562 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6563 helper_vmexit(type, param);
6564 }
6565 break;
6566 case SVM_EXIT_MSR:
6567 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6568 /* FIXME: this should be read in at vmrun (faster this way?) */
6569 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6570 uint32_t t0, t1;
6571 switch((uint32_t)ECX) {
6572 case 0 ... 0x1fff:
6573 t0 = (ECX * 2) % 8;
6574 t1 = ECX / 8;
6575 break;
6576 case 0xc0000000 ... 0xc0001fff:
6577 t0 = (8192 + ECX - 0xc0000000) * 2;
6578 t1 = (t0 / 8);
6579 t0 %= 8;
6580 break;
6581 case 0xc0010000 ... 0xc0011fff:
6582 t0 = (16384 + ECX - 0xc0010000) * 2;
6583 t1 = (t0 / 8);
6584 t0 %= 8;
6585 break;
6586 default:
6587 helper_vmexit(type, param);
6588 t0 = 0;
6589 t1 = 0;
6590 break;
6591 }
6592 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6593 helper_vmexit(type, param);
6594 }
6595 break;
6596 default:
6597 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6598 helper_vmexit(type, param);
6599 }
6600 break;
6601 }
6602#else
6603 AssertMsgFailed(("We shouldn't be here, HWACCM supported differently!"));
6604#endif
6605}
6606
6607void helper_svm_check_io(uint32_t port, uint32_t param,
6608 uint32_t next_eip_addend)
6609{
6610 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6611 /* FIXME: this should be read in at vmrun (faster this way?) */
6612 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6613 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6614 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6615 /* next EIP */
6616 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6617 env->eip + next_eip_addend);
6618 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6619 }
6620 }
6621}
6622
6623/* Note: currently only 32 bits of exit_code are used */
6624void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6625{
6626 uint32_t int_ctl;
6627
6628 if (loglevel & CPU_LOG_TB_IN_ASM)
6629 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6630 exit_code, exit_info_1,
6631 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6632 EIP);
6633
6634 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6635 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6636 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6637 } else {
6638 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6639 }
6640
6641 /* Save the VM state in the vmcb */
6642 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6643 &env->segs[R_ES]);
6644 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6645 &env->segs[R_CS]);
6646 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6647 &env->segs[R_SS]);
6648 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6649 &env->segs[R_DS]);
6650
6651 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6652 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6653
6654 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6655 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6656
6657 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6658 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6659 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6660 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6661 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6662
6663 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6664 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6665 int_ctl |= env->v_tpr & V_TPR_MASK;
6666 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6667 int_ctl |= V_IRQ_MASK;
6668 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6669
6670 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6671 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6672 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6673 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6674 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6675 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6676 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6677
6678 /* Reload the host state from vm_hsave */
6679 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6680 env->hflags &= ~HF_SVMI_MASK;
6681 env->intercept = 0;
6682 env->intercept_exceptions = 0;
6683 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6684 env->tsc_offset = 0;
6685
6686 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6687 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6688
6689 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6690 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6691
6692 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6693 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6694 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6695 /* we need to set the efer after the crs so the hidden flags get
6696 set properly */
6697 cpu_load_efer(env,
6698 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6699 env->eflags = 0;
6700 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6701 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6702 CC_OP = CC_OP_EFLAGS;
6703
6704 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6705 env, R_ES);
6706 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6707 env, R_CS);
6708 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6709 env, R_SS);
6710 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6711 env, R_DS);
6712
6713 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6714 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6715 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6716
6717 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6718 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6719
6720 /* other setups */
6721 cpu_x86_set_cpl(env, 0);
6722 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6723 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6724
6725 env->hflags2 &= ~HF2_GIF_MASK;
6726 /* FIXME: Resets the current ASID register to zero (host ASID). */
6727
6728 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6729
6730 /* Clears the TSC_OFFSET inside the processor. */
6731
6732 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6733 from the page table indicated the host's CR3. If the PDPEs contain
6734 illegal state, the processor causes a shutdown. */
6735
6736 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6737 env->cr[0] |= CR0_PE_MASK;
6738 env->eflags &= ~VM_MASK;
6739
6740 /* Disables all breakpoints in the host DR7 register. */
6741
6742 /* Checks the reloaded host state for consistency. */
6743
6744 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6745 host's code segment or non-canonical (in the case of long mode), a
6746 #GP fault is delivered inside the host.) */
6747
6748 /* remove any pending exception */
6749 env->exception_index = -1;
6750 env->error_code = 0;
6751 env->old_exception = -1;
6752
6753 cpu_loop_exit();
6754}
6755
6756#endif
6757
6758/* MMX/SSE */
6759/* XXX: optimize by storing fptt and fptags in the static cpu state */
6760void helper_enter_mmx(void)
6761{
6762 env->fpstt = 0;
6763 *(uint32_t *)(env->fptags) = 0;
6764 *(uint32_t *)(env->fptags + 4) = 0;
6765}
6766
6767void helper_emms(void)
6768{
6769 /* set to empty state */
6770 *(uint32_t *)(env->fptags) = 0x01010101;
6771 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6772}
6773
6774/* XXX: suppress */
6775void helper_movq(uint64_t *d, uint64_t *s)
6776{
6777 *d = *s;
6778}
6779
6780#define SHIFT 0
6781#include "ops_sse.h"
6782
6783#define SHIFT 1
6784#include "ops_sse.h"
6785
6786#define SHIFT 0
6787#include "helper_template.h"
6788#undef SHIFT
6789
6790#define SHIFT 1
6791#include "helper_template.h"
6792#undef SHIFT
6793
6794#define SHIFT 2
6795#include "helper_template.h"
6796#undef SHIFT
6797
6798#ifdef TARGET_X86_64
6799
6800#define SHIFT 3
6801#include "helper_template.h"
6802#undef SHIFT
6803
6804#endif
6805
6806/* bit operations */
6807target_ulong helper_bsf(target_ulong t0)
6808{
6809 int count;
6810 target_ulong res;
6811
6812 res = t0;
6813 count = 0;
6814 while ((res & 1) == 0) {
6815 count++;
6816 res >>= 1;
6817 }
6818 return count;
6819}
6820
6821target_ulong helper_bsr(target_ulong t0)
6822{
6823 int count;
6824 target_ulong res, mask;
6825
6826 res = t0;
6827 count = TARGET_LONG_BITS - 1;
6828 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
6829 while ((res & mask) == 0) {
6830 count--;
6831 res <<= 1;
6832 }
6833 return count;
6834}
6835
6836
6837static int compute_all_eflags(void)
6838{
6839 return CC_SRC;
6840}
6841
6842static int compute_c_eflags(void)
6843{
6844 return CC_SRC & CC_C;
6845}
6846
6847#ifndef VBOX
6848CCTable cc_table[CC_OP_NB] = {
6849 [CC_OP_DYNAMIC] = { /* should never happen */ },
6850
6851 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
6852
6853 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
6854 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
6855 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
6856
6857 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
6858 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
6859 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
6860
6861 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
6862 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
6863 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
6864
6865 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
6866 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
6867 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
6868
6869 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
6870 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
6871 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
6872
6873 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
6874 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
6875 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
6876
6877 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
6878 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
6879 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
6880
6881 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
6882 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
6883 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
6884
6885 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
6886 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
6887 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
6888
6889 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
6890 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
6891 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
6892
6893#ifdef TARGET_X86_64
6894 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
6895
6896 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
6897
6898 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
6899
6900 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
6901
6902 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
6903
6904 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
6905
6906 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
6907
6908 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
6909
6910 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
6911
6912 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
6913#endif
6914};
6915#else /* VBOX */
6916/* Sync carefully with cpu.h */
6917CCTable cc_table[CC_OP_NB] = {
6918 /* CC_OP_DYNAMIC */ { 0, 0 },
6919
6920 /* CC_OP_EFLAGS */ { compute_all_eflags, compute_c_eflags },
6921
6922 /* CC_OP_MULB */ { compute_all_mulb, compute_c_mull },
6923 /* CC_OP_MULW */ { compute_all_mulw, compute_c_mull },
6924 /* CC_OP_MULL */ { compute_all_mull, compute_c_mull },
6925#ifdef TARGET_X86_64
6926 /* CC_OP_MULQ */ { compute_all_mulq, compute_c_mull },
6927#else
6928 /* CC_OP_MULQ */ { 0, 0 },
6929#endif
6930
6931 /* CC_OP_ADDB */ { compute_all_addb, compute_c_addb },
6932 /* CC_OP_ADDW */ { compute_all_addw, compute_c_addw },
6933 /* CC_OP_ADDL */ { compute_all_addl, compute_c_addl },
6934#ifdef TARGET_X86_64
6935 /* CC_OP_ADDQ */ { compute_all_addq, compute_c_addq },
6936#else
6937 /* CC_OP_ADDQ */ { 0, 0 },
6938#endif
6939
6940 /* CC_OP_ADCB */ { compute_all_adcb, compute_c_adcb },
6941 /* CC_OP_ADCW */ { compute_all_adcw, compute_c_adcw },
6942 /* CC_OP_ADCL */ { compute_all_adcl, compute_c_adcl },
6943#ifdef TARGET_X86_64
6944 /* CC_OP_ADCQ */ { compute_all_adcq, compute_c_adcq },
6945#else
6946 /* CC_OP_ADCQ */ { 0, 0 },
6947#endif
6948
6949 /* CC_OP_SUBB */ { compute_all_subb, compute_c_subb },
6950 /* CC_OP_SUBW */ { compute_all_subw, compute_c_subw },
6951 /* CC_OP_SUBL */ { compute_all_subl, compute_c_subl },
6952#ifdef TARGET_X86_64
6953 /* CC_OP_SUBQ */ { compute_all_subq, compute_c_subq },
6954#else
6955 /* CC_OP_SUBQ */ { 0, 0 },
6956#endif
6957
6958 /* CC_OP_SBBB */ { compute_all_sbbb, compute_c_sbbb },
6959 /* CC_OP_SBBW */ { compute_all_sbbw, compute_c_sbbw },
6960 /* CC_OP_SBBL */ { compute_all_sbbl, compute_c_sbbl },
6961#ifdef TARGET_X86_64
6962 /* CC_OP_SBBQ */ { compute_all_sbbq, compute_c_sbbq },
6963#else
6964 /* CC_OP_SBBQ */ { 0, 0 },
6965#endif
6966
6967 /* CC_OP_LOGICB */ { compute_all_logicb, compute_c_logicb },
6968 /* CC_OP_LOGICW */ { compute_all_logicw, compute_c_logicw },
6969 /* CC_OP_LOGICL */ { compute_all_logicl, compute_c_logicl },
6970#ifdef TARGET_X86_64
6971 /* CC_OP_LOGICQ */ { compute_all_logicq, compute_c_logicq },
6972#else
6973 /* CC_OP_LOGICQ */ { 0, 0 },
6974#endif
6975
6976 /* CC_OP_INCB */ { compute_all_incb, compute_c_incl },
6977 /* CC_OP_INCW */ { compute_all_incw, compute_c_incl },
6978 /* CC_OP_INCL */ { compute_all_incl, compute_c_incl },
6979#ifdef TARGET_X86_64
6980 /* CC_OP_INCQ */ { compute_all_incq, compute_c_incl },
6981#else
6982 /* CC_OP_INCQ */ { 0, 0 },
6983#endif
6984
6985 /* CC_OP_DECB */ { compute_all_decb, compute_c_incl },
6986 /* CC_OP_DECW */ { compute_all_decw, compute_c_incl },
6987 /* CC_OP_DECL */ { compute_all_decl, compute_c_incl },
6988#ifdef TARGET_X86_64
6989 /* CC_OP_DECQ */ { compute_all_decq, compute_c_incl },
6990#else
6991 /* CC_OP_DECQ */ { 0, 0 },
6992#endif
6993
6994 /* CC_OP_SHLB */ { compute_all_shlb, compute_c_shlb },
6995 /* CC_OP_SHLW */ { compute_all_shlw, compute_c_shlw },
6996 /* CC_OP_SHLL */ { compute_all_shll, compute_c_shll },
6997#ifdef TARGET_X86_64
6998 /* CC_OP_SHLQ */ { compute_all_shlq, compute_c_shlq },
6999#else
7000 /* CC_OP_SHLQ */ { 0, 0 },
7001#endif
7002
7003 /* CC_OP_SARB */ { compute_all_sarb, compute_c_sarl },
7004 /* CC_OP_SARW */ { compute_all_sarw, compute_c_sarl },
7005 /* CC_OP_SARL */ { compute_all_sarl, compute_c_sarl },
7006#ifdef TARGET_X86_64
7007 /* CC_OP_SARQ */ { compute_all_sarq, compute_c_sarl},
7008#else
7009 /* CC_OP_SARQ */ { 0, 0 },
7010#endif
7011};
7012#endif /* VBOX */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette