VirtualBox

source: vbox/trunk/src/recompiler/target-i386/op_helper.c@ 47937

最後變更 在這個檔案從47937是 47767,由 vboxsync 提交於 11 年 前

REM: clear unnecessary TR attributes on task switch.

  • 屬性 svn:eol-style 設為 native
檔案大小: 201.6 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "exec.h"
30#include "exec-all.h"
31#include "host-utils.h"
32#include "ioport.h"
33
34#ifdef VBOX
35# include "qemu-common.h"
36# include <math.h>
37# include "tcg.h"
38#endif /* VBOX */
39
40//#define DEBUG_PCALL
41
42
43#ifdef DEBUG_PCALL
44# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
45# define LOG_PCALL_STATE(env) \
46 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
47#else
48# define LOG_PCALL(...) do { } while (0)
49# define LOG_PCALL_STATE(env) do { } while (0)
50#endif
51
52
53#if 0
54#define raise_exception_err(a, b)\
55do {\
56 qemu_log("raise_exception line=%d\n", __LINE__);\
57 (raise_exception_err)(a, b);\
58} while (0)
59#endif
60
61static const uint8_t parity_table[256] = {
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
84 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
86 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
92 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
94};
95
96/* modulo 17 table */
97static const uint8_t rclw_table[32] = {
98 0, 1, 2, 3, 4, 5, 6, 7,
99 8, 9,10,11,12,13,14,15,
100 16, 0, 1, 2, 3, 4, 5, 6,
101 7, 8, 9,10,11,12,13,14,
102};
103
104/* modulo 9 table */
105static const uint8_t rclb_table[32] = {
106 0, 1, 2, 3, 4, 5, 6, 7,
107 8, 0, 1, 2, 3, 4, 5, 6,
108 7, 8, 0, 1, 2, 3, 4, 5,
109 6, 7, 8, 0, 1, 2, 3, 4,
110};
111
112static const CPU86_LDouble f15rk[7] =
113{
114 0.00000000000000000000L,
115 1.00000000000000000000L,
116 3.14159265358979323851L, /*pi*/
117 0.30102999566398119523L, /*lg2*/
118 0.69314718055994530943L, /*ln2*/
119 1.44269504088896340739L, /*l2e*/
120 3.32192809488736234781L, /*l2t*/
121};
122
123/* broken thread support */
124
125static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
126
127void helper_lock(void)
128{
129 spin_lock(&global_cpu_lock);
130}
131
132void helper_unlock(void)
133{
134 spin_unlock(&global_cpu_lock);
135}
136
137void helper_write_eflags(target_ulong t0, uint32_t update_mask)
138{
139 load_eflags(t0, update_mask);
140}
141
142target_ulong helper_read_eflags(void)
143{
144 uint32_t eflags;
145 eflags = helper_cc_compute_all(CC_OP);
146 eflags |= (DF & DF_MASK);
147 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
148 return eflags;
149}
150
151#ifdef VBOX
152
153void helper_write_eflags_vme(target_ulong t0)
154{
155 unsigned int new_eflags = t0;
156
157 assert(env->eflags & (1<<VM_SHIFT));
158
159 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
160 /* if TF will be set -> #GP */
161 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
162 || (new_eflags & TF_MASK)) {
163 raise_exception(EXCP0D_GPF);
164 } else {
165 load_eflags(new_eflags,
166 (TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff);
167
168 if (new_eflags & IF_MASK) {
169 env->eflags |= VIF_MASK;
170 } else {
171 env->eflags &= ~VIF_MASK;
172 }
173 }
174}
175
176target_ulong helper_read_eflags_vme(void)
177{
178 uint32_t eflags;
179 eflags = helper_cc_compute_all(CC_OP);
180 eflags |= (DF & DF_MASK);
181 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
182 if (env->eflags & VIF_MASK)
183 eflags |= IF_MASK;
184 else
185 eflags &= ~IF_MASK;
186
187 /* According to AMD manual, should be read with IOPL == 3 */
188 eflags |= (3 << IOPL_SHIFT);
189
190 /* We only use helper_read_eflags_vme() in 16-bits mode */
191 return eflags & 0xffff;
192}
193
194void helper_dump_state()
195{
196 LogRel(("CS:EIP=%08x:%08x, FLAGS=%08x\n", env->segs[R_CS].base, env->eip, env->eflags));
197 LogRel(("EAX=%08x\tECX=%08x\tEDX=%08x\tEBX=%08x\n",
198 (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
199 (uint32_t)env->regs[R_EDX], (uint32_t)env->regs[R_EBX]));
200 LogRel(("ESP=%08x\tEBP=%08x\tESI=%08x\tEDI=%08x\n",
201 (uint32_t)env->regs[R_ESP], (uint32_t)env->regs[R_EBP],
202 (uint32_t)env->regs[R_ESI], (uint32_t)env->regs[R_EDI]));
203}
204
205/**
206 * Updates e2 with the DESC_A_MASK, writes it to the descriptor table, and
207 * returns the updated e2.
208 *
209 * @returns e2 with A set.
210 * @param e2 The 2nd selector DWORD.
211 */
212static uint32_t set_segment_accessed(int selector, uint32_t e2)
213{
214 SegmentCache *dt = selector & X86_SEL_LDT ? &env->ldt : &env->gdt;
215 target_ulong ptr = dt->base + (selector & X86_SEL_MASK);
216
217 e2 |= DESC_A_MASK;
218 stl_kernel(ptr + 4, e2);
219 return e2;
220}
221
222#endif /* VBOX */
223
224/* return non zero if error */
225static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
226 int selector)
227{
228 SegmentCache *dt;
229 int index;
230 target_ulong ptr;
231
232#ifdef VBOX
233 /* Trying to load a selector with CPL=1? */
234 /** @todo this is a hack to correct the incorrect checking order for pending interrupts in the patm iret replacement code (corrected in the ring-1 version) */
235 /** @todo in theory the iret could fault and we'd still need this. */
236 if ((env->hflags & HF_CPL_MASK) == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0) && !EMIsRawRing1Enabled(env->pVM))
237 {
238 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
239 selector = selector & 0xfffc;
240 }
241#endif /* VBOX */
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274#ifndef VBOX
275 sc->flags = e2;
276#else
277 sc->flags = e2 & DESC_RAW_FLAG_BITS;
278 sc->newselector = 0;
279 sc->fVBoxFlags = CPUMSELREG_FLAGS_VALID;
280#endif
281}
282
283/* init the segment cache in vm86 mode. */
284static inline void load_seg_vm(int seg, int selector)
285{
286 selector &= 0xffff;
287#ifdef VBOX
288 /* flags must be 0xf3; expand-up read/write accessed data segment with DPL=3. (VT-x) */
289 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | DESC_A_MASK;
290 flags |= (3 << DESC_DPL_SHIFT);
291
292 cpu_x86_load_seg_cache(env, seg, selector,
293 (selector << 4), 0xffff, flags);
294#else /* VBOX */
295 cpu_x86_load_seg_cache(env, seg, selector,
296 (selector << 4), 0xffff, 0);
297#endif /* VBOX */
298}
299
300static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
301 uint32_t *esp_ptr, int dpl)
302{
303#ifndef VBOX
304 int type, index, shift;
305#else
306 unsigned int type, index, shift;
307#endif
308
309#if 0
310 {
311 int i;
312 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
313 for(i=0;i<env->tr.limit;i++) {
314 printf("%02x ", env->tr.base[i]);
315 if ((i & 7) == 7) printf("\n");
316 }
317 printf("\n");
318 }
319#endif
320
321 if (!(env->tr.flags & DESC_P_MASK))
322 cpu_abort(env, "invalid tss");
323 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
324 if ((type & 7) != 1)
325 cpu_abort(env, "invalid tss type");
326 shift = type >> 3;
327 index = (dpl * 4 + 2) << shift;
328 if (index + (4 << shift) - 1 > env->tr.limit)
329 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
330 if (shift == 0) {
331 *esp_ptr = lduw_kernel(env->tr.base + index);
332 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
333 } else {
334 *esp_ptr = ldl_kernel(env->tr.base + index);
335 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
336 }
337}
338
339/* XXX: merge with load_seg() */
340static void tss_load_seg(int seg_reg, int selector)
341{
342 uint32_t e1, e2;
343 int rpl, dpl, cpl;
344
345#ifdef VBOX
346 e1 = e2 = 0; /* gcc warning? */
347 cpl = env->hflags & HF_CPL_MASK;
348 /* Trying to load a selector with CPL=1? */
349 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
350 {
351 Log(("RPL 1 -> sel %04X -> %04X (tss_load_seg)\n", selector, selector & 0xfffc));
352 selector = selector & 0xfffc;
353 }
354#endif /* VBOX */
355
356 if ((selector & 0xfffc) != 0) {
357 if (load_segment(&e1, &e2, selector) != 0)
358 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
359 if (!(e2 & DESC_S_MASK))
360 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
361 rpl = selector & 3;
362 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
363 cpl = env->hflags & HF_CPL_MASK;
364 if (seg_reg == R_CS) {
365 if (!(e2 & DESC_CS_MASK))
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 /* XXX: is it correct ? */
368 if (dpl != rpl)
369 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
370 if ((e2 & DESC_C_MASK) && dpl > rpl)
371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
372 } else if (seg_reg == R_SS) {
373 /* SS must be writable data */
374 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
375 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
376 if (dpl != cpl || dpl != rpl)
377 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
378 } else {
379 /* not readable code */
380 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
381 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
382 /* if data or non conforming code, checks the rights */
383 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
384 if (dpl < cpl || dpl < rpl)
385 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
386 }
387 }
388 if (!(e2 & DESC_P_MASK))
389 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
390 cpu_x86_load_seg_cache(env, seg_reg, selector,
391 get_seg_base(e1, e2),
392 get_seg_limit(e1, e2),
393 e2);
394 } else {
395 if (seg_reg == R_SS || seg_reg == R_CS)
396 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
397#ifdef VBOX
398# if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */
399 cpu_x86_load_seg_cache(env, seg_reg, selector,
400 0, 0, 0);
401# endif
402#endif /* VBOX */
403 }
404}
405
406#define SWITCH_TSS_JMP 0
407#define SWITCH_TSS_IRET 1
408#define SWITCH_TSS_CALL 2
409
410/* XXX: restore CPU state in registers (PowerPC case) */
411static void switch_tss(int tss_selector,
412 uint32_t e1, uint32_t e2, int source,
413 uint32_t next_eip)
414{
415 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
416 target_ulong tss_base;
417 uint32_t new_regs[8], new_segs[6];
418 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
419 uint32_t old_eflags, eflags_mask;
420 SegmentCache *dt;
421#ifndef VBOX
422 int index;
423#else
424 unsigned int index;
425#endif
426 target_ulong ptr;
427
428 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
429 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
430
431 /* if task gate, we read the TSS segment and we load it */
432 if (type == 5) {
433 if (!(e2 & DESC_P_MASK))
434 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
435 tss_selector = e1 >> 16;
436 if (tss_selector & 4)
437 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
438 if (load_segment(&e1, &e2, tss_selector) != 0)
439 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
440 if (e2 & DESC_S_MASK)
441 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
442 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
443 if ((type & 7) != 1)
444 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
445 }
446
447 if (!(e2 & DESC_P_MASK))
448 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
449
450 if (type & 8)
451 tss_limit_max = 103;
452 else
453 tss_limit_max = 43;
454 tss_limit = get_seg_limit(e1, e2);
455 tss_base = get_seg_base(e1, e2);
456 if ((tss_selector & 4) != 0 ||
457 tss_limit < tss_limit_max)
458 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
459 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
460 if (old_type & 8)
461 old_tss_limit_max = 103;
462 else
463 old_tss_limit_max = 43;
464
465#ifndef VBOX /* The old TSS is written first... */
466 /* read all the registers from the new TSS */
467 if (type & 8) {
468 /* 32 bit */
469 new_cr3 = ldl_kernel(tss_base + 0x1c);
470 new_eip = ldl_kernel(tss_base + 0x20);
471 new_eflags = ldl_kernel(tss_base + 0x24);
472 for(i = 0; i < 8; i++)
473 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
474 for(i = 0; i < 6; i++)
475 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
476 new_ldt = lduw_kernel(tss_base + 0x60);
477 new_trap = ldl_kernel(tss_base + 0x64);
478 } else {
479 /* 16 bit */
480 new_cr3 = 0;
481 new_eip = lduw_kernel(tss_base + 0x0e);
482 new_eflags = lduw_kernel(tss_base + 0x10);
483 for(i = 0; i < 8; i++)
484 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
485 for(i = 0; i < 4; i++)
486 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
487 new_ldt = lduw_kernel(tss_base + 0x2a);
488 new_segs[R_FS] = 0;
489 new_segs[R_GS] = 0;
490 new_trap = 0;
491 }
492#endif
493
494 /* NOTE: we must avoid memory exceptions during the task switch,
495 so we make dummy accesses before */
496 /* XXX: it can still fail in some cases, so a bigger hack is
497 necessary to valid the TLB after having done the accesses */
498
499 v1 = ldub_kernel(env->tr.base);
500 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
501 stb_kernel(env->tr.base, v1);
502 stb_kernel(env->tr.base + old_tss_limit_max, v2);
503
504 /* clear busy bit (it is restartable) */
505 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
506 target_ulong ptr;
507 uint32_t e2;
508 ptr = env->gdt.base + (env->tr.selector & ~7);
509 e2 = ldl_kernel(ptr + 4);
510 e2 &= ~DESC_TSS_BUSY_MASK;
511 stl_kernel(ptr + 4, e2);
512 }
513 old_eflags = compute_eflags();
514 if (source == SWITCH_TSS_IRET)
515 old_eflags &= ~NT_MASK;
516
517 /* save the current state in the old TSS */
518 if (type & 8) {
519 /* 32 bit */
520 stl_kernel(env->tr.base + 0x20, next_eip);
521 stl_kernel(env->tr.base + 0x24, old_eflags);
522 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
523 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
524 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
525 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
526 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
527 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
528 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
529 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
530 for(i = 0; i < 6; i++)
531 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
532#if defined(VBOX) && defined(DEBUG)
533 printf("TSS 32 bits switch\n");
534 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
535#endif
536 } else {
537 /* 16 bit */
538 stw_kernel(env->tr.base + 0x0e, next_eip);
539 stw_kernel(env->tr.base + 0x10, old_eflags);
540 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
541 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
542 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
543 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
544 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
545 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
546 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
547 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
548 for(i = 0; i < 4; i++)
549 stw_kernel(env->tr.base + (0x22 + i * 2), env->segs[i].selector);
550 }
551
552#ifdef VBOX
553 /* read all the registers from the new TSS - may be the same as the old one */
554 if (type & 8) {
555 /* 32 bit */
556 new_cr3 = ldl_kernel(tss_base + 0x1c);
557 new_eip = ldl_kernel(tss_base + 0x20);
558 new_eflags = ldl_kernel(tss_base + 0x24);
559 for(i = 0; i < 8; i++)
560 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
561 for(i = 0; i < 6; i++)
562 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
563 new_ldt = lduw_kernel(tss_base + 0x60);
564 new_trap = ldl_kernel(tss_base + 0x64);
565 } else {
566 /* 16 bit */
567 new_cr3 = 0;
568 new_eip = lduw_kernel(tss_base + 0x0e);
569 new_eflags = lduw_kernel(tss_base + 0x10);
570 for(i = 0; i < 8; i++)
571 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
572 for(i = 0; i < 4; i++)
573 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 2));
574 new_ldt = lduw_kernel(tss_base + 0x2a);
575 new_segs[R_FS] = 0;
576 new_segs[R_GS] = 0;
577 new_trap = 0;
578 }
579#endif
580
581 /* now if an exception occurs, it will occurs in the next task
582 context */
583
584 if (source == SWITCH_TSS_CALL) {
585 stw_kernel(tss_base, env->tr.selector);
586 new_eflags |= NT_MASK;
587 }
588
589 /* set busy bit */
590 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
591 target_ulong ptr;
592 uint32_t e2;
593 ptr = env->gdt.base + (tss_selector & ~7);
594 e2 = ldl_kernel(ptr + 4);
595 e2 |= DESC_TSS_BUSY_MASK;
596 stl_kernel(ptr + 4, e2);
597 }
598
599 /* set the new CPU state */
600 /* from this point, any exception which occurs can give problems */
601 env->cr[0] |= CR0_TS_MASK;
602 env->hflags |= HF_TS_MASK;
603 env->tr.selector = tss_selector;
604 env->tr.base = tss_base;
605 env->tr.limit = tss_limit;
606#ifndef VBOX
607 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
608#else
609 env->tr.flags = e2 & (DESC_RAW_FLAG_BITS & ~(DESC_TSS_BUSY_MASK)); /** @todo stop clearing the busy bit, VT-x and AMD-V seems to set it in the hidden bits. */
610 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
611 env->tr.newselector = 0;
612#endif
613
614 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
615 cpu_x86_update_cr3(env, new_cr3);
616 }
617
618 /* load all registers without an exception, then reload them with
619 possible exception */
620 env->eip = new_eip;
621 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
622 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
623 if (!(type & 8))
624 eflags_mask &= 0xffff;
625 load_eflags(new_eflags, eflags_mask);
626 /* XXX: what to do in 16 bit case ? */
627 EAX = new_regs[0];
628 ECX = new_regs[1];
629 EDX = new_regs[2];
630 EBX = new_regs[3];
631 ESP = new_regs[4];
632 EBP = new_regs[5];
633 ESI = new_regs[6];
634 EDI = new_regs[7];
635 if (new_eflags & VM_MASK) {
636 for(i = 0; i < 6; i++)
637 load_seg_vm(i, new_segs[i]);
638 /* in vm86, CPL is always 3 */
639 cpu_x86_set_cpl(env, 3);
640 } else {
641 /* CPL is set the RPL of CS */
642 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
643 /* first just selectors as the rest may trigger exceptions */
644 for(i = 0; i < 6; i++)
645 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
646 }
647
648 env->ldt.selector = new_ldt & ~4;
649 env->ldt.base = 0;
650 env->ldt.limit = 0;
651 env->ldt.flags = 0;
652#ifdef VBOX
653 env->ldt.flags = DESC_INTEL_UNUSABLE;
654 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
655 env->ldt.newselector = 0;
656#endif
657
658 /* load the LDT */
659 if (new_ldt & 4)
660 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
661
662 if ((new_ldt & 0xfffc) != 0) {
663 dt = &env->gdt;
664 index = new_ldt & ~7;
665 if ((index + 7) > dt->limit)
666 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
667 ptr = dt->base + index;
668 e1 = ldl_kernel(ptr);
669 e2 = ldl_kernel(ptr + 4);
670 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
671 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
672 if (!(e2 & DESC_P_MASK))
673 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
674 load_seg_cache_raw_dt(&env->ldt, e1, e2);
675 }
676
677 /* load the segments */
678 if (!(new_eflags & VM_MASK)) {
679 tss_load_seg(R_CS, new_segs[R_CS]);
680 tss_load_seg(R_SS, new_segs[R_SS]);
681 tss_load_seg(R_ES, new_segs[R_ES]);
682 tss_load_seg(R_DS, new_segs[R_DS]);
683 tss_load_seg(R_FS, new_segs[R_FS]);
684 tss_load_seg(R_GS, new_segs[R_GS]);
685 }
686
687 /* check that EIP is in the CS segment limits */
688 if (new_eip > env->segs[R_CS].limit) {
689 /* XXX: different exception if CALL ? */
690 raise_exception_err(EXCP0D_GPF, 0);
691 }
692
693#ifndef CONFIG_USER_ONLY
694 /* reset local breakpoints */
695 if (env->dr[7] & 0x55) {
696 for (i = 0; i < 4; i++) {
697 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
698 hw_breakpoint_remove(env, i);
699 }
700 env->dr[7] &= ~0x55;
701 }
702#endif
703}
704
705/* check if Port I/O is allowed in TSS */
706static inline void check_io(int addr, int size)
707{
708#ifndef VBOX
709 int io_offset, val, mask;
710#else
711 int val, mask;
712 unsigned int io_offset;
713#endif /* VBOX */
714
715 /* TSS must be a valid 32 bit one */
716 if (!(env->tr.flags & DESC_P_MASK) ||
717 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
718 env->tr.limit < 103)
719 goto fail;
720 io_offset = lduw_kernel(env->tr.base + 0x66);
721 io_offset += (addr >> 3);
722 /* Note: the check needs two bytes */
723 if ((io_offset + 1) > env->tr.limit)
724 goto fail;
725 val = lduw_kernel(env->tr.base + io_offset);
726 val >>= (addr & 7);
727 mask = (1 << size) - 1;
728 /* all bits must be zero to allow the I/O */
729 if ((val & mask) != 0) {
730 fail:
731 raise_exception_err(EXCP0D_GPF, 0);
732 }
733}
734
735#ifdef VBOX
736
737/* Keep in sync with gen_check_external_event() */
738void helper_check_external_event()
739{
740 if ( (env->interrupt_request & ( CPU_INTERRUPT_EXTERNAL_FLUSH_TLB
741 | CPU_INTERRUPT_EXTERNAL_EXIT
742 | CPU_INTERRUPT_EXTERNAL_TIMER
743 | CPU_INTERRUPT_EXTERNAL_DMA))
744 || ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
745 && (env->eflags & IF_MASK)
746 && !(env->hflags & HF_INHIBIT_IRQ_MASK) ) )
747 {
748 helper_external_event();
749 }
750
751}
752
753void helper_sync_seg(uint32_t reg)
754{
755 if (env->segs[reg].newselector)
756 sync_seg(env, reg, env->segs[reg].newselector);
757}
758
759#endif /* VBOX */
760
761void helper_check_iob(uint32_t t0)
762{
763 check_io(t0, 1);
764}
765
766void helper_check_iow(uint32_t t0)
767{
768 check_io(t0, 2);
769}
770
771void helper_check_iol(uint32_t t0)
772{
773 check_io(t0, 4);
774}
775
776void helper_outb(uint32_t port, uint32_t data)
777{
778#ifndef VBOX
779 cpu_outb(port, data & 0xff);
780#else
781 cpu_outb(env, port, data & 0xff);
782#endif
783}
784
785target_ulong helper_inb(uint32_t port)
786{
787#ifndef VBOX
788 return cpu_inb(port);
789#else
790 return cpu_inb(env, port);
791#endif
792}
793
794void helper_outw(uint32_t port, uint32_t data)
795{
796#ifndef VBOX
797 cpu_outw(port, data & 0xffff);
798#else
799 cpu_outw(env, port, data & 0xffff);
800#endif
801}
802
803target_ulong helper_inw(uint32_t port)
804{
805#ifndef VBOX
806 return cpu_inw(port);
807#else
808 return cpu_inw(env, port);
809#endif
810}
811
812void helper_outl(uint32_t port, uint32_t data)
813{
814#ifndef VBOX
815 cpu_outl(port, data);
816#else
817 cpu_outl(env, port, data);
818#endif
819}
820
821target_ulong helper_inl(uint32_t port)
822{
823#ifndef VBOX
824 return cpu_inl(port);
825#else
826 return cpu_inl(env, port);
827#endif
828}
829
830static inline unsigned int get_sp_mask(unsigned int e2)
831{
832 if (e2 & DESC_B_MASK)
833 return 0xffffffff;
834 else
835 return 0xffff;
836}
837
838static int exeption_has_error_code(int intno)
839{
840 switch(intno) {
841 case 8:
842 case 10:
843 case 11:
844 case 12:
845 case 13:
846 case 14:
847 case 17:
848 return 1;
849 }
850 return 0;
851}
852
853#ifdef TARGET_X86_64
854#define SET_ESP(val, sp_mask)\
855do {\
856 if ((sp_mask) == 0xffff)\
857 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
858 else if ((sp_mask) == 0xffffffffLL)\
859 ESP = (uint32_t)(val);\
860 else\
861 ESP = (val);\
862} while (0)
863#else
864#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
865#endif
866
867/* in 64-bit machines, this can overflow. So this segment addition macro
868 * can be used to trim the value to 32-bit whenever needed */
869#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
870
871/* XXX: add a is_user flag to have proper security support */
872#define PUSHW(ssp, sp, sp_mask, val)\
873{\
874 sp -= 2;\
875 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
876}
877
878#define PUSHL(ssp, sp, sp_mask, val)\
879{\
880 sp -= 4;\
881 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
882}
883
884#define POPW(ssp, sp, sp_mask, val)\
885{\
886 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
887 sp += 2;\
888}
889
890#define POPL(ssp, sp, sp_mask, val)\
891{\
892 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
893 sp += 4;\
894}
895
896/* protected mode interrupt */
897static void do_interrupt_protected(int intno, int is_int, int error_code,
898 unsigned int next_eip, int is_hw)
899{
900 SegmentCache *dt;
901 target_ulong ptr, ssp;
902 int type, dpl, selector, ss_dpl, cpl;
903 int has_error_code, new_stack, shift;
904 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
905 uint32_t old_eip, sp_mask;
906
907#ifdef VBOX
908 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
909 cpu_loop_exit();
910#endif
911
912 has_error_code = 0;
913 if (!is_int && !is_hw)
914 has_error_code = exeption_has_error_code(intno);
915 if (is_int)
916 old_eip = next_eip;
917 else
918 old_eip = env->eip;
919
920 dt = &env->idt;
921#ifndef VBOX
922 if (intno * 8 + 7 > dt->limit)
923#else
924 if ((unsigned)intno * 8 + 7 > dt->limit)
925#endif
926 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
927 ptr = dt->base + intno * 8;
928 e1 = ldl_kernel(ptr);
929 e2 = ldl_kernel(ptr + 4);
930 /* check gate type */
931 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
932 switch(type) {
933 case 5: /* task gate */
934#ifdef VBOX
935 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936 cpl = env->hflags & HF_CPL_MASK;
937 /* check privilege if software int */
938 if (is_int && dpl < cpl)
939 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
940#endif
941 /* must do that check here to return the correct error code */
942 if (!(e2 & DESC_P_MASK))
943 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
944 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
945 if (has_error_code) {
946 int type;
947 uint32_t mask;
948 /* push the error code */
949 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
950 shift = type >> 3;
951 if (env->segs[R_SS].flags & DESC_B_MASK)
952 mask = 0xffffffff;
953 else
954 mask = 0xffff;
955 esp = (ESP - (2 << shift)) & mask;
956 ssp = env->segs[R_SS].base + esp;
957 if (shift)
958 stl_kernel(ssp, error_code);
959 else
960 stw_kernel(ssp, error_code);
961 SET_ESP(esp, mask);
962 }
963 return;
964 case 6: /* 286 interrupt gate */
965 case 7: /* 286 trap gate */
966 case 14: /* 386 interrupt gate */
967 case 15: /* 386 trap gate */
968 break;
969 default:
970 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
971 break;
972 }
973 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
974 cpl = env->hflags & HF_CPL_MASK;
975 /* check privilege if software int */
976 if (is_int && dpl < cpl)
977 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
978 /* check valid bit */
979 if (!(e2 & DESC_P_MASK))
980 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
981 selector = e1 >> 16;
982 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
983 if ((selector & 0xfffc) == 0)
984 raise_exception_err(EXCP0D_GPF, 0);
985
986 if (load_segment(&e1, &e2, selector) != 0)
987 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
988#ifdef VBOX /** @todo figure out when this is done one day... */
989 if (!(e2 & DESC_A_MASK))
990 e2 = set_segment_accessed(selector, e2);
991#endif
992 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
993 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
994 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
995 if (dpl > cpl)
996 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
997 if (!(e2 & DESC_P_MASK))
998 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
999 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1000 /* to inner privilege */
1001 get_ss_esp_from_tss(&ss, &esp, dpl);
1002 if ((ss & 0xfffc) == 0)
1003 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1004 if ((ss & 3) != dpl)
1005 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1006 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1007 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1008#ifdef VBOX /** @todo figure out when this is done one day... */
1009 if (!(ss_e2 & DESC_A_MASK))
1010 ss_e2 = set_segment_accessed(ss, ss_e2);
1011#endif
1012 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1013 if (ss_dpl != dpl)
1014 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1015 if (!(ss_e2 & DESC_S_MASK) ||
1016 (ss_e2 & DESC_CS_MASK) ||
1017 !(ss_e2 & DESC_W_MASK))
1018 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1019 if (!(ss_e2 & DESC_P_MASK))
1020#ifdef VBOX /* See page 3-477 of 253666.pdf */
1021 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
1022#else
1023 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1024#endif
1025 new_stack = 1;
1026 sp_mask = get_sp_mask(ss_e2);
1027 ssp = get_seg_base(ss_e1, ss_e2);
1028#if defined(VBOX) && defined(DEBUG)
1029 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
1030#endif
1031 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1032 /* to same privilege */
1033 if (env->eflags & VM_MASK)
1034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035 new_stack = 0;
1036 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1037 ssp = env->segs[R_SS].base;
1038 esp = ESP;
1039 dpl = cpl;
1040 } else {
1041 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1042 new_stack = 0; /* avoid warning */
1043 sp_mask = 0; /* avoid warning */
1044 ssp = 0; /* avoid warning */
1045 esp = 0; /* avoid warning */
1046 }
1047
1048 shift = type >> 3;
1049
1050#if 0
1051 /* XXX: check that enough room is available */
1052 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
1053 if (env->eflags & VM_MASK)
1054 push_size += 8;
1055 push_size <<= shift;
1056#endif
1057 if (shift == 1) {
1058 if (new_stack) {
1059 if (env->eflags & VM_MASK) {
1060 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
1061 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
1062 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
1063 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
1064 }
1065 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
1066 PUSHL(ssp, esp, sp_mask, ESP);
1067 }
1068 PUSHL(ssp, esp, sp_mask, compute_eflags());
1069 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
1070 PUSHL(ssp, esp, sp_mask, old_eip);
1071 if (has_error_code) {
1072 PUSHL(ssp, esp, sp_mask, error_code);
1073 }
1074 } else {
1075 if (new_stack) {
1076 if (env->eflags & VM_MASK) {
1077 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
1078 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
1079 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
1080 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
1081 }
1082 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
1083 PUSHW(ssp, esp, sp_mask, ESP);
1084 }
1085 PUSHW(ssp, esp, sp_mask, compute_eflags());
1086 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
1087 PUSHW(ssp, esp, sp_mask, old_eip);
1088 if (has_error_code) {
1089 PUSHW(ssp, esp, sp_mask, error_code);
1090 }
1091 }
1092
1093 if (new_stack) {
1094 if (env->eflags & VM_MASK) {
1095 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
1096 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
1097 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
1098 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
1099 }
1100 ss = (ss & ~3) | dpl;
1101 cpu_x86_load_seg_cache(env, R_SS, ss,
1102 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
1103 }
1104 SET_ESP(esp, sp_mask);
1105
1106 selector = (selector & ~3) | dpl;
1107 cpu_x86_load_seg_cache(env, R_CS, selector,
1108 get_seg_base(e1, e2),
1109 get_seg_limit(e1, e2),
1110 e2);
1111 cpu_x86_set_cpl(env, dpl);
1112 env->eip = offset;
1113
1114 /* interrupt gate clear IF mask */
1115 if ((type & 1) == 0) {
1116 env->eflags &= ~IF_MASK;
1117 }
1118#ifndef VBOX
1119 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1120#else
1121 /*
1122 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1123 * gets confused by seemingly changed EFLAGS. See #3491 and
1124 * public bug #2341.
1125 */
1126 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1127#endif
1128}
1129
1130#ifdef VBOX
1131
1132/* check if VME interrupt redirection is enabled in TSS */
1133DECLINLINE(bool) is_vme_irq_redirected(int intno)
1134{
1135 unsigned int io_offset, intredir_offset;
1136 unsigned char val, mask;
1137
1138 /* TSS must be a valid 32 bit one */
1139 if (!(env->tr.flags & DESC_P_MASK) ||
1140 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
1141 env->tr.limit < 103)
1142 goto fail;
1143 io_offset = lduw_kernel(env->tr.base + 0x66);
1144 /* Make sure the io bitmap offset is valid; anything less than sizeof(VBOXTSS) means there's none. */
1145 if (io_offset < 0x68 + 0x20)
1146 io_offset = 0x68 + 0x20;
1147 /* the virtual interrupt redirection bitmap is located below the io bitmap */
1148 intredir_offset = io_offset - 0x20;
1149
1150 intredir_offset += (intno >> 3);
1151 if ((intredir_offset) > env->tr.limit)
1152 goto fail;
1153
1154 val = ldub_kernel(env->tr.base + intredir_offset);
1155 mask = 1 << (unsigned char)(intno & 7);
1156
1157 /* bit set means no redirection. */
1158 if ((val & mask) != 0) {
1159 return false;
1160 }
1161 return true;
1162
1163fail:
1164 raise_exception_err(EXCP0D_GPF, 0);
1165 return true;
1166}
1167
1168/* V86 mode software interrupt with CR4.VME=1 */
1169static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
1170{
1171 target_ulong ptr, ssp;
1172 int selector;
1173 uint32_t offset, esp;
1174 uint32_t old_cs, old_eflags;
1175 uint32_t iopl;
1176
1177 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
1178
1179 if (!is_vme_irq_redirected(intno))
1180 {
1181 if (iopl == 3)
1182 {
1183 do_interrupt_protected(intno, 1, error_code, next_eip, 0);
1184 return;
1185 }
1186 else
1187 raise_exception_err(EXCP0D_GPF, 0);
1188 }
1189
1190 /* virtual mode idt is at linear address 0 */
1191 ptr = 0 + intno * 4;
1192 offset = lduw_kernel(ptr);
1193 selector = lduw_kernel(ptr + 2);
1194 esp = ESP;
1195 ssp = env->segs[R_SS].base;
1196 old_cs = env->segs[R_CS].selector;
1197
1198 old_eflags = compute_eflags();
1199 if (iopl < 3)
1200 {
1201 /* copy VIF into IF and set IOPL to 3 */
1202 if (env->eflags & VIF_MASK)
1203 old_eflags |= IF_MASK;
1204 else
1205 old_eflags &= ~IF_MASK;
1206
1207 old_eflags |= (3 << IOPL_SHIFT);
1208 }
1209
1210 /* XXX: use SS segment size ? */
1211 PUSHW(ssp, esp, 0xffff, old_eflags);
1212 PUSHW(ssp, esp, 0xffff, old_cs);
1213 PUSHW(ssp, esp, 0xffff, next_eip);
1214
1215 /* update processor state */
1216 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1217 env->eip = offset;
1218 env->segs[R_CS].selector = selector;
1219 env->segs[R_CS].base = (selector << 4);
1220 env->eflags &= ~(TF_MASK | RF_MASK);
1221
1222 if (iopl < 3)
1223 env->eflags &= ~VIF_MASK;
1224 else
1225 env->eflags &= ~IF_MASK;
1226}
1227
1228#endif /* VBOX */
1229
1230#ifdef TARGET_X86_64
1231
1232#define PUSHQ(sp, val)\
1233{\
1234 sp -= 8;\
1235 stq_kernel(sp, (val));\
1236}
1237
1238#define POPQ(sp, val)\
1239{\
1240 val = ldq_kernel(sp);\
1241 sp += 8;\
1242}
1243
1244static inline target_ulong get_rsp_from_tss(int level)
1245{
1246 int index;
1247
1248#if 0
1249 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
1250 env->tr.base, env->tr.limit);
1251#endif
1252
1253 if (!(env->tr.flags & DESC_P_MASK))
1254 cpu_abort(env, "invalid tss");
1255 index = 8 * level + 4;
1256 if ((index + 7) > env->tr.limit)
1257 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
1258 return ldq_kernel(env->tr.base + index);
1259}
1260
1261/* 64 bit interrupt */
1262static void do_interrupt64(int intno, int is_int, int error_code,
1263 target_ulong next_eip, int is_hw)
1264{
1265 SegmentCache *dt;
1266 target_ulong ptr;
1267 int type, dpl, selector, cpl, ist;
1268 int has_error_code, new_stack;
1269 uint32_t e1, e2, e3, ss;
1270 target_ulong old_eip, esp, offset;
1271
1272#ifdef VBOX
1273 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
1274 cpu_loop_exit();
1275#endif
1276
1277 has_error_code = 0;
1278 if (!is_int && !is_hw)
1279 has_error_code = exeption_has_error_code(intno);
1280 if (is_int)
1281 old_eip = next_eip;
1282 else
1283 old_eip = env->eip;
1284
1285 dt = &env->idt;
1286 if (intno * 16 + 15 > dt->limit)
1287 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1288 ptr = dt->base + intno * 16;
1289 e1 = ldl_kernel(ptr);
1290 e2 = ldl_kernel(ptr + 4);
1291 e3 = ldl_kernel(ptr + 8);
1292 /* check gate type */
1293 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1294 switch(type) {
1295 case 14: /* 386 interrupt gate */
1296 case 15: /* 386 trap gate */
1297 break;
1298 default:
1299 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1300 break;
1301 }
1302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1303 cpl = env->hflags & HF_CPL_MASK;
1304 /* check privilege if software int */
1305 if (is_int && dpl < cpl)
1306 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1307 /* check valid bit */
1308 if (!(e2 & DESC_P_MASK))
1309 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1310 selector = e1 >> 16;
1311 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1312 ist = e2 & 7;
1313 if ((selector & 0xfffc) == 0)
1314 raise_exception_err(EXCP0D_GPF, 0);
1315
1316 if (load_segment(&e1, &e2, selector) != 0)
1317 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1318 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1319 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1320 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1321 if (dpl > cpl)
1322 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1323 if (!(e2 & DESC_P_MASK))
1324 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1325 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1326 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1327 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1328 /* to inner privilege */
1329 if (ist != 0)
1330 esp = get_rsp_from_tss(ist + 3);
1331 else
1332 esp = get_rsp_from_tss(dpl);
1333 esp &= ~0xfLL; /* align stack */
1334 ss = 0;
1335 new_stack = 1;
1336 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1337 /* to same privilege */
1338 if (env->eflags & VM_MASK)
1339 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1340 new_stack = 0;
1341 if (ist != 0)
1342 esp = get_rsp_from_tss(ist + 3);
1343 else
1344 esp = ESP;
1345 esp &= ~0xfLL; /* align stack */
1346 dpl = cpl;
1347 } else {
1348 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1349 new_stack = 0; /* avoid warning */
1350 esp = 0; /* avoid warning */
1351 }
1352
1353 PUSHQ(esp, env->segs[R_SS].selector);
1354 PUSHQ(esp, ESP);
1355 PUSHQ(esp, compute_eflags());
1356 PUSHQ(esp, env->segs[R_CS].selector);
1357 PUSHQ(esp, old_eip);
1358 if (has_error_code) {
1359 PUSHQ(esp, error_code);
1360 }
1361
1362 if (new_stack) {
1363 ss = 0 | dpl;
1364#ifndef VBOX
1365 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1366#else
1367 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1368#endif
1369 }
1370 ESP = esp;
1371
1372 selector = (selector & ~3) | dpl;
1373 cpu_x86_load_seg_cache(env, R_CS, selector,
1374 get_seg_base(e1, e2),
1375 get_seg_limit(e1, e2),
1376 e2);
1377 cpu_x86_set_cpl(env, dpl);
1378 env->eip = offset;
1379
1380 /* interrupt gate clear IF mask */
1381 if ((type & 1) == 0) {
1382 env->eflags &= ~IF_MASK;
1383 }
1384#ifndef VBOX
1385 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1386#else /* VBOX */
1387 /*
1388 * We must clear VIP/VIF too on interrupt entry, as otherwise FreeBSD
1389 * gets confused by seemingly changed EFLAGS. See #3491 and
1390 * public bug #2341.
1391 */
1392 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK | VIF_MASK | VIP_MASK);
1393#endif /* VBOX */
1394}
1395#endif
1396
1397#ifdef TARGET_X86_64
1398#if defined(CONFIG_USER_ONLY)
1399void helper_syscall(int next_eip_addend)
1400{
1401 env->exception_index = EXCP_SYSCALL;
1402 env->exception_next_eip = env->eip + next_eip_addend;
1403 cpu_loop_exit();
1404}
1405#else
1406void helper_syscall(int next_eip_addend)
1407{
1408 int selector;
1409
1410 if (!(env->efer & MSR_EFER_SCE)) {
1411 raise_exception_err(EXCP06_ILLOP, 0);
1412 }
1413 selector = (env->star >> 32) & 0xffff;
1414 if (env->hflags & HF_LMA_MASK) {
1415 int code64;
1416
1417 ECX = env->eip + next_eip_addend;
1418 env->regs[11] = compute_eflags();
1419
1420 code64 = env->hflags & HF_CS64_MASK;
1421
1422 cpu_x86_set_cpl(env, 0);
1423 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1424 0, 0xffffffff,
1425 DESC_G_MASK | DESC_P_MASK |
1426 DESC_S_MASK |
1427 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1428 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1429 0, 0xffffffff,
1430 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1431 DESC_S_MASK |
1432 DESC_W_MASK | DESC_A_MASK);
1433 env->eflags &= ~env->fmask;
1434 load_eflags(env->eflags, 0);
1435 if (code64)
1436 env->eip = env->lstar;
1437 else
1438 env->eip = env->cstar;
1439 } else {
1440 ECX = (uint32_t)(env->eip + next_eip_addend);
1441
1442 cpu_x86_set_cpl(env, 0);
1443 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1444 0, 0xffffffff,
1445 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1446 DESC_S_MASK |
1447 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1448 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1449 0, 0xffffffff,
1450 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1451 DESC_S_MASK |
1452 DESC_W_MASK | DESC_A_MASK);
1453 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1454 env->eip = (uint32_t)env->star;
1455 }
1456}
1457#endif
1458#endif
1459
1460#ifdef TARGET_X86_64
1461void helper_sysret(int dflag)
1462{
1463 int cpl, selector;
1464
1465 if (!(env->efer & MSR_EFER_SCE)) {
1466 raise_exception_err(EXCP06_ILLOP, 0);
1467 }
1468 cpl = env->hflags & HF_CPL_MASK;
1469 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1470 raise_exception_err(EXCP0D_GPF, 0);
1471 }
1472 selector = (env->star >> 48) & 0xffff;
1473 if (env->hflags & HF_LMA_MASK) {
1474 if (dflag == 2) {
1475 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1476 0, 0xffffffff,
1477 DESC_G_MASK | DESC_P_MASK |
1478 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1479 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1480 DESC_L_MASK);
1481 env->eip = ECX;
1482 } else {
1483 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1484 0, 0xffffffff,
1485 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1486 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1487 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1488 env->eip = (uint32_t)ECX;
1489 }
1490 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1491 0, 0xffffffff,
1492 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1493 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1494 DESC_W_MASK | DESC_A_MASK);
1495 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1496 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1497 cpu_x86_set_cpl(env, 3);
1498 } else {
1499 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1500 0, 0xffffffff,
1501 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1502 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1503 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1504 env->eip = (uint32_t)ECX;
1505 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1506 0, 0xffffffff,
1507 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1508 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1509 DESC_W_MASK | DESC_A_MASK);
1510 env->eflags |= IF_MASK;
1511 cpu_x86_set_cpl(env, 3);
1512 }
1513}
1514#endif
1515
1516#ifdef VBOX
1517
1518/**
1519 * Checks and processes external VMM events.
1520 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1521 */
1522void helper_external_event(void)
1523{
1524# if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1525 uintptr_t uSP;
1526# ifdef RT_ARCH_AMD64
1527 __asm__ __volatile__("movq %%rsp, %0" : "=r" (uSP));
1528# else
1529 __asm__ __volatile__("movl %%esp, %0" : "=r" (uSP));
1530# endif
1531 AssertMsg(!(uSP & 15), ("xSP=%#p\n", uSP));
1532# endif
1533 /* Keep in sync with flags checked by gen_check_external_event() */
1534 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1535 {
1536 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1537 ~CPU_INTERRUPT_EXTERNAL_HARD);
1538 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1539 }
1540 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1541 {
1542 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1543 ~CPU_INTERRUPT_EXTERNAL_EXIT);
1544 cpu_exit(env);
1545 }
1546 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1547 {
1548 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1549 ~CPU_INTERRUPT_EXTERNAL_DMA);
1550 remR3DmaRun(env);
1551 }
1552 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1553 {
1554 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1555 ~CPU_INTERRUPT_EXTERNAL_TIMER);
1556 remR3TimersRun(env);
1557 }
1558 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)
1559 {
1560 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request,
1561 ~CPU_INTERRUPT_EXTERNAL_HARD);
1562 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1563 }
1564}
1565
1566/* helper for recording call instruction addresses for later scanning */
1567void helper_record_call()
1568{
1569 if ( !(env->state & CPU_RAW_RING0)
1570 && (env->cr[0] & CR0_PG_MASK)
1571 && !(env->eflags & X86_EFL_IF))
1572 remR3RecordCall(env);
1573}
1574
1575#endif /* VBOX */
1576
1577/* real mode interrupt */
1578static void do_interrupt_real(int intno, int is_int, int error_code,
1579 unsigned int next_eip)
1580{
1581 SegmentCache *dt;
1582 target_ulong ptr, ssp;
1583 int selector;
1584 uint32_t offset, esp;
1585 uint32_t old_cs, old_eip;
1586
1587 /* real mode (simpler !) */
1588 dt = &env->idt;
1589#ifndef VBOX
1590 if (intno * 4 + 3 > dt->limit)
1591#else
1592 if ((unsigned)intno * 4 + 3 > dt->limit)
1593#endif
1594 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1595 ptr = dt->base + intno * 4;
1596 offset = lduw_kernel(ptr);
1597 selector = lduw_kernel(ptr + 2);
1598 esp = ESP;
1599 ssp = env->segs[R_SS].base;
1600 if (is_int)
1601 old_eip = next_eip;
1602 else
1603 old_eip = env->eip;
1604 old_cs = env->segs[R_CS].selector;
1605 /* XXX: use SS segment size ? */
1606 PUSHW(ssp, esp, 0xffff, compute_eflags());
1607 PUSHW(ssp, esp, 0xffff, old_cs);
1608 PUSHW(ssp, esp, 0xffff, old_eip);
1609
1610 /* update processor state */
1611 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1612 env->eip = offset;
1613 env->segs[R_CS].selector = selector;
1614 env->segs[R_CS].base = (selector << 4);
1615 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1616}
1617
1618/* fake user mode interrupt */
1619void do_interrupt_user(int intno, int is_int, int error_code,
1620 target_ulong next_eip)
1621{
1622 SegmentCache *dt;
1623 target_ulong ptr;
1624 int dpl, cpl, shift;
1625 uint32_t e2;
1626
1627 dt = &env->idt;
1628 if (env->hflags & HF_LMA_MASK) {
1629 shift = 4;
1630 } else {
1631 shift = 3;
1632 }
1633 ptr = dt->base + (intno << shift);
1634 e2 = ldl_kernel(ptr + 4);
1635
1636 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1637 cpl = env->hflags & HF_CPL_MASK;
1638 /* check privilege if software int */
1639 if (is_int && dpl < cpl)
1640 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1641
1642 /* Since we emulate only user space, we cannot do more than
1643 exiting the emulation with the suitable exception and error
1644 code */
1645 if (is_int)
1646 EIP = next_eip;
1647}
1648
1649#if !defined(CONFIG_USER_ONLY)
1650static void handle_even_inj(int intno, int is_int, int error_code,
1651 int is_hw, int rm)
1652{
1653 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1654 if (!(event_inj & SVM_EVTINJ_VALID)) {
1655 int type;
1656 if (is_int)
1657 type = SVM_EVTINJ_TYPE_SOFT;
1658 else
1659 type = SVM_EVTINJ_TYPE_EXEPT;
1660 event_inj = intno | type | SVM_EVTINJ_VALID;
1661 if (!rm && exeption_has_error_code(intno)) {
1662 event_inj |= SVM_EVTINJ_VALID_ERR;
1663 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1664 }
1665 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1666 }
1667}
1668#endif
1669
1670/*
1671 * Begin execution of an interruption. is_int is TRUE if coming from
1672 * the int instruction. next_eip is the EIP value AFTER the interrupt
1673 * instruction. It is only relevant if is_int is TRUE.
1674 */
1675void do_interrupt(int intno, int is_int, int error_code,
1676 target_ulong next_eip, int is_hw)
1677{
1678 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1679 if ((env->cr[0] & CR0_PE_MASK)) {
1680 static int count;
1681 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1682 count, intno, error_code, is_int,
1683 env->hflags & HF_CPL_MASK,
1684 env->segs[R_CS].selector, EIP,
1685 (int)env->segs[R_CS].base + EIP,
1686 env->segs[R_SS].selector, ESP);
1687 if (intno == 0x0e) {
1688 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1689 } else {
1690 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1691 }
1692 qemu_log("\n");
1693 log_cpu_state(env, X86_DUMP_CCOP);
1694#if 0
1695 {
1696 int i;
1697 uint8_t *ptr;
1698 qemu_log(" code=");
1699 ptr = env->segs[R_CS].base + env->eip;
1700 for(i = 0; i < 16; i++) {
1701 qemu_log(" %02x", ldub(ptr + i));
1702 }
1703 qemu_log("\n");
1704 }
1705#endif
1706 count++;
1707 }
1708 }
1709#ifdef VBOX
1710 if (RT_UNLIKELY(env->state & CPU_EMULATE_SINGLE_STEP)) {
1711 if (is_int) {
1712 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv%s\n",
1713 intno, error_code, (RTGCPTR)env->eip, is_hw ? " hw" : "");
1714 } else {
1715 RTLogPrintf("do_interrupt: %#04x err=%#x pc=%#RGv next=%#RGv%s\n",
1716 intno, error_code, (RTGCPTR)env->eip, (RTGCPTR)next_eip, is_hw ? " hw" : "");
1717 }
1718 }
1719#endif
1720 if (env->cr[0] & CR0_PE_MASK) {
1721#if !defined(CONFIG_USER_ONLY)
1722 if (env->hflags & HF_SVMI_MASK)
1723 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1724#endif
1725#ifdef TARGET_X86_64
1726 if (env->hflags & HF_LMA_MASK) {
1727 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1728 } else
1729#endif
1730 {
1731#ifdef VBOX
1732 /* int xx *, v86 code and VME enabled? */
1733 if ( (env->eflags & VM_MASK)
1734 && (env->cr[4] & CR4_VME_MASK)
1735 && is_int
1736 && !is_hw
1737 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1738 )
1739 do_soft_interrupt_vme(intno, error_code, next_eip);
1740 else
1741#endif /* VBOX */
1742 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1743 }
1744 } else {
1745#if !defined(CONFIG_USER_ONLY)
1746 if (env->hflags & HF_SVMI_MASK)
1747 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1748#endif
1749 do_interrupt_real(intno, is_int, error_code, next_eip);
1750 }
1751
1752#if !defined(CONFIG_USER_ONLY)
1753 if (env->hflags & HF_SVMI_MASK) {
1754 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1755 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1756 }
1757#endif
1758}
1759
1760/* This should come from sysemu.h - if we could include it here... */
1761void qemu_system_reset_request(void);
1762
1763/*
1764 * Check nested exceptions and change to double or triple fault if
1765 * needed. It should only be called, if this is not an interrupt.
1766 * Returns the new exception number.
1767 */
1768static int check_exception(int intno, int *error_code)
1769{
1770 int first_contributory = env->old_exception == 0 ||
1771 (env->old_exception >= 10 &&
1772 env->old_exception <= 13);
1773 int second_contributory = intno == 0 ||
1774 (intno >= 10 && intno <= 13);
1775
1776 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1777 env->old_exception, intno);
1778
1779#if !defined(CONFIG_USER_ONLY)
1780 if (env->old_exception == EXCP08_DBLE) {
1781 if (env->hflags & HF_SVMI_MASK)
1782 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1783
1784 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1785
1786# ifndef VBOX
1787 qemu_system_reset_request();
1788# else
1789 remR3RaiseRC(env->pVM, VINF_EM_RESET); /** @todo test + improve tripple fault handling. */
1790# endif
1791 return EXCP_HLT;
1792 }
1793#endif
1794
1795 if ((first_contributory && second_contributory)
1796 || (env->old_exception == EXCP0E_PAGE &&
1797 (second_contributory || (intno == EXCP0E_PAGE)))) {
1798 intno = EXCP08_DBLE;
1799 *error_code = 0;
1800 }
1801
1802 if (second_contributory || (intno == EXCP0E_PAGE) ||
1803 (intno == EXCP08_DBLE))
1804 env->old_exception = intno;
1805
1806 return intno;
1807}
1808
1809/*
1810 * Signal an interruption. It is executed in the main CPU loop.
1811 * is_int is TRUE if coming from the int instruction. next_eip is the
1812 * EIP value AFTER the interrupt instruction. It is only relevant if
1813 * is_int is TRUE.
1814 */
1815static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1816 int next_eip_addend)
1817{
1818#if defined(VBOX) && defined(DEBUG)
1819 Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, (RTGCPTR)env->eip + next_eip_addend));
1820#endif
1821 if (!is_int) {
1822 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1823 intno = check_exception(intno, &error_code);
1824 } else {
1825 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1826 }
1827
1828 env->exception_index = intno;
1829 env->error_code = error_code;
1830 env->exception_is_int = is_int;
1831 env->exception_next_eip = env->eip + next_eip_addend;
1832 cpu_loop_exit();
1833}
1834
1835/* shortcuts to generate exceptions */
1836
1837void raise_exception_err(int exception_index, int error_code)
1838{
1839 raise_interrupt(exception_index, 0, error_code, 0);
1840}
1841
1842void raise_exception(int exception_index)
1843{
1844 raise_interrupt(exception_index, 0, 0, 0);
1845}
1846
1847void raise_exception_env(int exception_index, CPUState *nenv)
1848{
1849 env = nenv;
1850 raise_exception(exception_index);
1851}
1852/* SMM support */
1853
1854#if defined(CONFIG_USER_ONLY)
1855
1856void do_smm_enter(void)
1857{
1858}
1859
1860void helper_rsm(void)
1861{
1862}
1863
1864#else
1865
1866#ifdef TARGET_X86_64
1867#define SMM_REVISION_ID 0x00020064
1868#else
1869#define SMM_REVISION_ID 0x00020000
1870#endif
1871
1872void do_smm_enter(void)
1873{
1874 target_ulong sm_state;
1875 SegmentCache *dt;
1876 int i, offset;
1877
1878 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1879 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1880
1881 env->hflags |= HF_SMM_MASK;
1882 cpu_smm_update(env);
1883
1884 sm_state = env->smbase + 0x8000;
1885
1886#ifdef TARGET_X86_64
1887 for(i = 0; i < 6; i++) {
1888 dt = &env->segs[i];
1889 offset = 0x7e00 + i * 16;
1890 stw_phys(sm_state + offset, dt->selector);
1891 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1892 stl_phys(sm_state + offset + 4, dt->limit);
1893 stq_phys(sm_state + offset + 8, dt->base);
1894 }
1895
1896 stq_phys(sm_state + 0x7e68, env->gdt.base);
1897 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1898
1899 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1900 stq_phys(sm_state + 0x7e78, env->ldt.base);
1901 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1902 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1903
1904 stq_phys(sm_state + 0x7e88, env->idt.base);
1905 stl_phys(sm_state + 0x7e84, env->idt.limit);
1906
1907 stw_phys(sm_state + 0x7e90, env->tr.selector);
1908 stq_phys(sm_state + 0x7e98, env->tr.base);
1909 stl_phys(sm_state + 0x7e94, env->tr.limit);
1910 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1911
1912 stq_phys(sm_state + 0x7ed0, env->efer);
1913
1914 stq_phys(sm_state + 0x7ff8, EAX);
1915 stq_phys(sm_state + 0x7ff0, ECX);
1916 stq_phys(sm_state + 0x7fe8, EDX);
1917 stq_phys(sm_state + 0x7fe0, EBX);
1918 stq_phys(sm_state + 0x7fd8, ESP);
1919 stq_phys(sm_state + 0x7fd0, EBP);
1920 stq_phys(sm_state + 0x7fc8, ESI);
1921 stq_phys(sm_state + 0x7fc0, EDI);
1922 for(i = 8; i < 16; i++)
1923 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1924 stq_phys(sm_state + 0x7f78, env->eip);
1925 stl_phys(sm_state + 0x7f70, compute_eflags());
1926 stl_phys(sm_state + 0x7f68, env->dr[6]);
1927 stl_phys(sm_state + 0x7f60, env->dr[7]);
1928
1929 stl_phys(sm_state + 0x7f48, env->cr[4]);
1930 stl_phys(sm_state + 0x7f50, env->cr[3]);
1931 stl_phys(sm_state + 0x7f58, env->cr[0]);
1932
1933 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1934 stl_phys(sm_state + 0x7f00, env->smbase);
1935#else
1936 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1937 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1938 stl_phys(sm_state + 0x7ff4, compute_eflags());
1939 stl_phys(sm_state + 0x7ff0, env->eip);
1940 stl_phys(sm_state + 0x7fec, EDI);
1941 stl_phys(sm_state + 0x7fe8, ESI);
1942 stl_phys(sm_state + 0x7fe4, EBP);
1943 stl_phys(sm_state + 0x7fe0, ESP);
1944 stl_phys(sm_state + 0x7fdc, EBX);
1945 stl_phys(sm_state + 0x7fd8, EDX);
1946 stl_phys(sm_state + 0x7fd4, ECX);
1947 stl_phys(sm_state + 0x7fd0, EAX);
1948 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1949 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1950
1951 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1952 stl_phys(sm_state + 0x7f64, env->tr.base);
1953 stl_phys(sm_state + 0x7f60, env->tr.limit);
1954 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1955
1956 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1957 stl_phys(sm_state + 0x7f80, env->ldt.base);
1958 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1959 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1960
1961 stl_phys(sm_state + 0x7f74, env->gdt.base);
1962 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1963
1964 stl_phys(sm_state + 0x7f58, env->idt.base);
1965 stl_phys(sm_state + 0x7f54, env->idt.limit);
1966
1967 for(i = 0; i < 6; i++) {
1968 dt = &env->segs[i];
1969 if (i < 3)
1970 offset = 0x7f84 + i * 12;
1971 else
1972 offset = 0x7f2c + (i - 3) * 12;
1973 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1974 stl_phys(sm_state + offset + 8, dt->base);
1975 stl_phys(sm_state + offset + 4, dt->limit);
1976 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1977 }
1978 stl_phys(sm_state + 0x7f14, env->cr[4]);
1979
1980 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1981 stl_phys(sm_state + 0x7ef8, env->smbase);
1982#endif
1983 /* init SMM cpu state */
1984
1985#ifdef TARGET_X86_64
1986 cpu_load_efer(env, 0);
1987#endif
1988 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1989 env->eip = 0x00008000;
1990 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1991 0xffffffff, 0);
1992 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1993 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1994 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1995 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1996 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1997
1998 cpu_x86_update_cr0(env,
1999 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
2000 cpu_x86_update_cr4(env, 0);
2001 env->dr[7] = 0x00000400;
2002 CC_OP = CC_OP_EFLAGS;
2003}
2004
2005void helper_rsm(void)
2006{
2007#ifdef VBOX
2008 cpu_abort(env, "helper_rsm");
2009#else /* !VBOX */
2010 target_ulong sm_state;
2011 int i, offset;
2012 uint32_t val;
2013
2014 sm_state = env->smbase + 0x8000;
2015#ifdef TARGET_X86_64
2016 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
2017
2018 for(i = 0; i < 6; i++) {
2019 offset = 0x7e00 + i * 16;
2020 cpu_x86_load_seg_cache(env, i,
2021 lduw_phys(sm_state + offset),
2022 ldq_phys(sm_state + offset + 8),
2023 ldl_phys(sm_state + offset + 4),
2024 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
2025 }
2026
2027 env->gdt.base = ldq_phys(sm_state + 0x7e68);
2028 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
2029
2030 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
2031 env->ldt.base = ldq_phys(sm_state + 0x7e78);
2032 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
2033 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
2034#ifdef VBOX
2035 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2036 env->ldt.newselector = 0;
2037#endif
2038
2039 env->idt.base = ldq_phys(sm_state + 0x7e88);
2040 env->idt.limit = ldl_phys(sm_state + 0x7e84);
2041
2042 env->tr.selector = lduw_phys(sm_state + 0x7e90);
2043 env->tr.base = ldq_phys(sm_state + 0x7e98);
2044 env->tr.limit = ldl_phys(sm_state + 0x7e94);
2045 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
2046#ifdef VBOX
2047 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2048 env->tr.newselector = 0;
2049#endif
2050
2051 EAX = ldq_phys(sm_state + 0x7ff8);
2052 ECX = ldq_phys(sm_state + 0x7ff0);
2053 EDX = ldq_phys(sm_state + 0x7fe8);
2054 EBX = ldq_phys(sm_state + 0x7fe0);
2055 ESP = ldq_phys(sm_state + 0x7fd8);
2056 EBP = ldq_phys(sm_state + 0x7fd0);
2057 ESI = ldq_phys(sm_state + 0x7fc8);
2058 EDI = ldq_phys(sm_state + 0x7fc0);
2059 for(i = 8; i < 16; i++)
2060 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
2061 env->eip = ldq_phys(sm_state + 0x7f78);
2062 load_eflags(ldl_phys(sm_state + 0x7f70),
2063 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2064 env->dr[6] = ldl_phys(sm_state + 0x7f68);
2065 env->dr[7] = ldl_phys(sm_state + 0x7f60);
2066
2067 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
2068 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
2069 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
2070
2071 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2072 if (val & 0x20000) {
2073 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
2074 }
2075#else
2076 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
2077 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
2078 load_eflags(ldl_phys(sm_state + 0x7ff4),
2079 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
2080 env->eip = ldl_phys(sm_state + 0x7ff0);
2081 EDI = ldl_phys(sm_state + 0x7fec);
2082 ESI = ldl_phys(sm_state + 0x7fe8);
2083 EBP = ldl_phys(sm_state + 0x7fe4);
2084 ESP = ldl_phys(sm_state + 0x7fe0);
2085 EBX = ldl_phys(sm_state + 0x7fdc);
2086 EDX = ldl_phys(sm_state + 0x7fd8);
2087 ECX = ldl_phys(sm_state + 0x7fd4);
2088 EAX = ldl_phys(sm_state + 0x7fd0);
2089 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
2090 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
2091
2092 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
2093 env->tr.base = ldl_phys(sm_state + 0x7f64);
2094 env->tr.limit = ldl_phys(sm_state + 0x7f60);
2095 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
2096#ifdef VBOX
2097 env->tr.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2098 env->tr.newselector = 0;
2099#endif
2100
2101 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
2102 env->ldt.base = ldl_phys(sm_state + 0x7f80);
2103 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
2104 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
2105#ifdef VBOX
2106 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2107 env->ldt.newselector = 0;
2108#endif
2109
2110 env->gdt.base = ldl_phys(sm_state + 0x7f74);
2111 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
2112
2113 env->idt.base = ldl_phys(sm_state + 0x7f58);
2114 env->idt.limit = ldl_phys(sm_state + 0x7f54);
2115
2116 for(i = 0; i < 6; i++) {
2117 if (i < 3)
2118 offset = 0x7f84 + i * 12;
2119 else
2120 offset = 0x7f2c + (i - 3) * 12;
2121 cpu_x86_load_seg_cache(env, i,
2122 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
2123 ldl_phys(sm_state + offset + 8),
2124 ldl_phys(sm_state + offset + 4),
2125 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
2126 }
2127 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
2128
2129 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
2130 if (val & 0x20000) {
2131 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
2132 }
2133#endif
2134 CC_OP = CC_OP_EFLAGS;
2135 env->hflags &= ~HF_SMM_MASK;
2136 cpu_smm_update(env);
2137
2138 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
2139 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
2140#endif /* !VBOX */
2141}
2142
2143#endif /* !CONFIG_USER_ONLY */
2144
2145
2146/* division, flags are undefined */
2147
2148void helper_divb_AL(target_ulong t0)
2149{
2150 unsigned int num, den, q, r;
2151
2152 num = (EAX & 0xffff);
2153 den = (t0 & 0xff);
2154 if (den == 0) {
2155 raise_exception(EXCP00_DIVZ);
2156 }
2157 q = (num / den);
2158 if (q > 0xff)
2159 raise_exception(EXCP00_DIVZ);
2160 q &= 0xff;
2161 r = (num % den) & 0xff;
2162 EAX = (EAX & ~0xffff) | (r << 8) | q;
2163}
2164
2165void helper_idivb_AL(target_ulong t0)
2166{
2167 int num, den, q, r;
2168
2169 num = (int16_t)EAX;
2170 den = (int8_t)t0;
2171 if (den == 0) {
2172 raise_exception(EXCP00_DIVZ);
2173 }
2174 q = (num / den);
2175 if (q != (int8_t)q)
2176 raise_exception(EXCP00_DIVZ);
2177 q &= 0xff;
2178 r = (num % den) & 0xff;
2179 EAX = (EAX & ~0xffff) | (r << 8) | q;
2180}
2181
2182void helper_divw_AX(target_ulong t0)
2183{
2184 unsigned int num, den, q, r;
2185
2186 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2187 den = (t0 & 0xffff);
2188 if (den == 0) {
2189 raise_exception(EXCP00_DIVZ);
2190 }
2191 q = (num / den);
2192 if (q > 0xffff)
2193 raise_exception(EXCP00_DIVZ);
2194 q &= 0xffff;
2195 r = (num % den) & 0xffff;
2196 EAX = (EAX & ~0xffff) | q;
2197 EDX = (EDX & ~0xffff) | r;
2198}
2199
2200void helper_idivw_AX(target_ulong t0)
2201{
2202 int num, den, q, r;
2203
2204 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
2205 den = (int16_t)t0;
2206 if (den == 0) {
2207 raise_exception(EXCP00_DIVZ);
2208 }
2209 q = (num / den);
2210 if (q != (int16_t)q)
2211 raise_exception(EXCP00_DIVZ);
2212 q &= 0xffff;
2213 r = (num % den) & 0xffff;
2214 EAX = (EAX & ~0xffff) | q;
2215 EDX = (EDX & ~0xffff) | r;
2216}
2217
2218void helper_divl_EAX(target_ulong t0)
2219{
2220 unsigned int den, r;
2221 uint64_t num, q;
2222
2223 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2224 den = t0;
2225 if (den == 0) {
2226 raise_exception(EXCP00_DIVZ);
2227 }
2228 q = (num / den);
2229 r = (num % den);
2230 if (q > 0xffffffff)
2231 raise_exception(EXCP00_DIVZ);
2232 EAX = (uint32_t)q;
2233 EDX = (uint32_t)r;
2234}
2235
2236void helper_idivl_EAX(target_ulong t0)
2237{
2238 int den, r;
2239 int64_t num, q;
2240
2241 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2242 den = t0;
2243 if (den == 0) {
2244 raise_exception(EXCP00_DIVZ);
2245 }
2246 q = (num / den);
2247 r = (num % den);
2248 if (q != (int32_t)q)
2249 raise_exception(EXCP00_DIVZ);
2250 EAX = (uint32_t)q;
2251 EDX = (uint32_t)r;
2252}
2253
2254/* bcd */
2255
2256/* XXX: exception */
2257void helper_aam(int base)
2258{
2259 int al, ah;
2260 al = EAX & 0xff;
2261 ah = al / base;
2262 al = al % base;
2263 EAX = (EAX & ~0xffff) | al | (ah << 8);
2264 CC_DST = al;
2265}
2266
2267void helper_aad(int base)
2268{
2269 int al, ah;
2270 al = EAX & 0xff;
2271 ah = (EAX >> 8) & 0xff;
2272 al = ((ah * base) + al) & 0xff;
2273 EAX = (EAX & ~0xffff) | al;
2274 CC_DST = al;
2275}
2276
2277void helper_aaa(void)
2278{
2279 int icarry;
2280 int al, ah, af;
2281 int eflags;
2282
2283 eflags = helper_cc_compute_all(CC_OP);
2284 af = eflags & CC_A;
2285 al = EAX & 0xff;
2286 ah = (EAX >> 8) & 0xff;
2287
2288 icarry = (al > 0xf9);
2289 if (((al & 0x0f) > 9 ) || af) {
2290 al = (al + 6) & 0x0f;
2291 ah = (ah + 1 + icarry) & 0xff;
2292 eflags |= CC_C | CC_A;
2293 } else {
2294 eflags &= ~(CC_C | CC_A);
2295 al &= 0x0f;
2296 }
2297 EAX = (EAX & ~0xffff) | al | (ah << 8);
2298 CC_SRC = eflags;
2299}
2300
2301void helper_aas(void)
2302{
2303 int icarry;
2304 int al, ah, af;
2305 int eflags;
2306
2307 eflags = helper_cc_compute_all(CC_OP);
2308 af = eflags & CC_A;
2309 al = EAX & 0xff;
2310 ah = (EAX >> 8) & 0xff;
2311
2312 icarry = (al < 6);
2313 if (((al & 0x0f) > 9 ) || af) {
2314 al = (al - 6) & 0x0f;
2315 ah = (ah - 1 - icarry) & 0xff;
2316 eflags |= CC_C | CC_A;
2317 } else {
2318 eflags &= ~(CC_C | CC_A);
2319 al &= 0x0f;
2320 }
2321 EAX = (EAX & ~0xffff) | al | (ah << 8);
2322 CC_SRC = eflags;
2323}
2324
2325void helper_daa(void)
2326{
2327 int al, af, cf;
2328 int eflags;
2329
2330 eflags = helper_cc_compute_all(CC_OP);
2331 cf = eflags & CC_C;
2332 af = eflags & CC_A;
2333 al = EAX & 0xff;
2334
2335 eflags = 0;
2336 if (((al & 0x0f) > 9 ) || af) {
2337 al = (al + 6) & 0xff;
2338 eflags |= CC_A;
2339 }
2340 if ((al > 0x9f) || cf) {
2341 al = (al + 0x60) & 0xff;
2342 eflags |= CC_C;
2343 }
2344 EAX = (EAX & ~0xff) | al;
2345 /* well, speed is not an issue here, so we compute the flags by hand */
2346 eflags |= (al == 0) << 6; /* zf */
2347 eflags |= parity_table[al]; /* pf */
2348 eflags |= (al & 0x80); /* sf */
2349 CC_SRC = eflags;
2350}
2351
2352void helper_das(void)
2353{
2354 int al, al1, af, cf;
2355 int eflags;
2356
2357 eflags = helper_cc_compute_all(CC_OP);
2358 cf = eflags & CC_C;
2359 af = eflags & CC_A;
2360 al = EAX & 0xff;
2361
2362 eflags = 0;
2363 al1 = al;
2364 if (((al & 0x0f) > 9 ) || af) {
2365 eflags |= CC_A;
2366 if (al < 6 || cf)
2367 eflags |= CC_C;
2368 al = (al - 6) & 0xff;
2369 }
2370 if ((al1 > 0x99) || cf) {
2371 al = (al - 0x60) & 0xff;
2372 eflags |= CC_C;
2373 }
2374 EAX = (EAX & ~0xff) | al;
2375 /* well, speed is not an issue here, so we compute the flags by hand */
2376 eflags |= (al == 0) << 6; /* zf */
2377 eflags |= parity_table[al]; /* pf */
2378 eflags |= (al & 0x80); /* sf */
2379 CC_SRC = eflags;
2380}
2381
2382void helper_into(int next_eip_addend)
2383{
2384 int eflags;
2385 eflags = helper_cc_compute_all(CC_OP);
2386 if (eflags & CC_O) {
2387 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2388 }
2389}
2390
2391void helper_cmpxchg8b(target_ulong a0)
2392{
2393 uint64_t d;
2394 int eflags;
2395
2396 eflags = helper_cc_compute_all(CC_OP);
2397 d = ldq(a0);
2398 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2399 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2400 eflags |= CC_Z;
2401 } else {
2402 /* always do the store */
2403 stq(a0, d);
2404 EDX = (uint32_t)(d >> 32);
2405 EAX = (uint32_t)d;
2406 eflags &= ~CC_Z;
2407 }
2408 CC_SRC = eflags;
2409}
2410
2411#ifdef TARGET_X86_64
2412void helper_cmpxchg16b(target_ulong a0)
2413{
2414 uint64_t d0, d1;
2415 int eflags;
2416
2417 if ((a0 & 0xf) != 0)
2418 raise_exception(EXCP0D_GPF);
2419 eflags = helper_cc_compute_all(CC_OP);
2420 d0 = ldq(a0);
2421 d1 = ldq(a0 + 8);
2422 if (d0 == EAX && d1 == EDX) {
2423 stq(a0, EBX);
2424 stq(a0 + 8, ECX);
2425 eflags |= CC_Z;
2426 } else {
2427 /* always do the store */
2428 stq(a0, d0);
2429 stq(a0 + 8, d1);
2430 EDX = d1;
2431 EAX = d0;
2432 eflags &= ~CC_Z;
2433 }
2434 CC_SRC = eflags;
2435}
2436#endif
2437
2438void helper_single_step(void)
2439{
2440#ifndef CONFIG_USER_ONLY
2441 check_hw_breakpoints(env, 1);
2442 env->dr[6] |= DR6_BS;
2443#endif
2444 raise_exception(EXCP01_DB);
2445}
2446
2447void helper_cpuid(void)
2448{
2449 uint32_t eax, ebx, ecx, edx;
2450
2451 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2452
2453 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2454 EAX = eax;
2455 EBX = ebx;
2456 ECX = ecx;
2457 EDX = edx;
2458}
2459
2460void helper_enter_level(int level, int data32, target_ulong t1)
2461{
2462 target_ulong ssp;
2463 uint32_t esp_mask, esp, ebp;
2464
2465 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2466 ssp = env->segs[R_SS].base;
2467 ebp = EBP;
2468 esp = ESP;
2469 if (data32) {
2470 /* 32 bit */
2471 esp -= 4;
2472 while (--level) {
2473 esp -= 4;
2474 ebp -= 4;
2475 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2476 }
2477 esp -= 4;
2478 stl(ssp + (esp & esp_mask), t1);
2479 } else {
2480 /* 16 bit */
2481 esp -= 2;
2482 while (--level) {
2483 esp -= 2;
2484 ebp -= 2;
2485 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2486 }
2487 esp -= 2;
2488 stw(ssp + (esp & esp_mask), t1);
2489 }
2490}
2491
2492#ifdef TARGET_X86_64
2493void helper_enter64_level(int level, int data64, target_ulong t1)
2494{
2495 target_ulong esp, ebp;
2496 ebp = EBP;
2497 esp = ESP;
2498
2499 if (data64) {
2500 /* 64 bit */
2501 esp -= 8;
2502 while (--level) {
2503 esp -= 8;
2504 ebp -= 8;
2505 stq(esp, ldq(ebp));
2506 }
2507 esp -= 8;
2508 stq(esp, t1);
2509 } else {
2510 /* 16 bit */
2511 esp -= 2;
2512 while (--level) {
2513 esp -= 2;
2514 ebp -= 2;
2515 stw(esp, lduw(ebp));
2516 }
2517 esp -= 2;
2518 stw(esp, t1);
2519 }
2520}
2521#endif
2522
2523void helper_lldt(int selector)
2524{
2525 SegmentCache *dt;
2526 uint32_t e1, e2;
2527#ifndef VBOX
2528 int index, entry_limit;
2529#else
2530 unsigned int index, entry_limit;
2531#endif
2532 target_ulong ptr;
2533
2534#ifdef VBOX
2535 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
2536 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(selector & 0xffff)));
2537#endif
2538
2539 selector &= 0xffff;
2540 if ((selector & 0xfffc) == 0) {
2541 /* XXX: NULL selector case: invalid LDT */
2542 env->ldt.base = 0;
2543 env->ldt.limit = 0;
2544#ifdef VBOX
2545 env->ldt.flags = DESC_INTEL_UNUSABLE;
2546 env->ldt.fVBoxFlags = CPUMSELREG_FLAGS_VALID;
2547 env->ldt.newselector = 0;
2548#endif
2549 } else {
2550 if (selector & 0x4)
2551 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2552 dt = &env->gdt;
2553 index = selector & ~7;
2554#ifdef TARGET_X86_64
2555 if (env->hflags & HF_LMA_MASK)
2556 entry_limit = 15;
2557 else
2558#endif
2559 entry_limit = 7;
2560 if ((index + entry_limit) > dt->limit)
2561 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2562 ptr = dt->base + index;
2563 e1 = ldl_kernel(ptr);
2564 e2 = ldl_kernel(ptr + 4);
2565 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2566 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2567 if (!(e2 & DESC_P_MASK))
2568 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2569#ifdef TARGET_X86_64
2570 if (env->hflags & HF_LMA_MASK) {
2571 uint32_t e3;
2572 e3 = ldl_kernel(ptr + 8);
2573 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2574 env->ldt.base |= (target_ulong)e3 << 32;
2575 } else
2576#endif
2577 {
2578 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2579 }
2580 }
2581 env->ldt.selector = selector;
2582#ifdef VBOX
2583 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2584 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2585#endif
2586}
2587
2588void helper_ltr(int selector)
2589{
2590 SegmentCache *dt;
2591 uint32_t e1, e2;
2592#ifndef VBOX
2593 int index, type, entry_limit;
2594#else
2595 unsigned int index;
2596 int type, entry_limit;
2597#endif
2598 target_ulong ptr;
2599
2600#ifdef VBOX
2601 Log(("helper_ltr: pc=%RGv old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2602 (RTGCPTR)env->eip, (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2603 env->tr.flags, (RTSEL)(selector & 0xffff)));
2604#endif
2605 selector &= 0xffff;
2606 if ((selector & 0xfffc) == 0) {
2607 /* NULL selector case: invalid TR */
2608#ifdef VBOX
2609 raise_exception_err(EXCP0A_TSS, 0);
2610#else
2611 env->tr.base = 0;
2612 env->tr.limit = 0;
2613 env->tr.flags = 0;
2614#endif
2615 } else {
2616 if (selector & 0x4)
2617 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2618 dt = &env->gdt;
2619 index = selector & ~7;
2620#ifdef TARGET_X86_64
2621 if (env->hflags & HF_LMA_MASK)
2622 entry_limit = 15;
2623 else
2624#endif
2625 entry_limit = 7;
2626 if ((index + entry_limit) > dt->limit)
2627 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2628 ptr = dt->base + index;
2629 e1 = ldl_kernel(ptr);
2630 e2 = ldl_kernel(ptr + 4);
2631 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2632 if ((e2 & DESC_S_MASK) ||
2633 (type != 1 && type != 9))
2634 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2635 if (!(e2 & DESC_P_MASK))
2636 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2637#ifdef TARGET_X86_64
2638 if (env->hflags & HF_LMA_MASK) {
2639 uint32_t e3, e4;
2640 e3 = ldl_kernel(ptr + 8);
2641 e4 = ldl_kernel(ptr + 12);
2642 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2643 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2644 load_seg_cache_raw_dt(&env->tr, e1, e2);
2645 env->tr.base |= (target_ulong)e3 << 32;
2646 } else
2647#endif
2648 {
2649 load_seg_cache_raw_dt(&env->tr, e1, e2);
2650 }
2651 e2 |= DESC_TSS_BUSY_MASK;
2652 stl_kernel(ptr + 4, e2);
2653 }
2654 env->tr.selector = selector;
2655#ifdef VBOX
2656 Log(("helper_ltr: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2657 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2658 env->tr.flags, (RTSEL)(selector & 0xffff)));
2659#endif
2660}
2661
2662/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2663void helper_load_seg(int seg_reg, int selector)
2664{
2665 uint32_t e1, e2;
2666 int cpl, dpl, rpl;
2667 SegmentCache *dt;
2668#ifndef VBOX
2669 int index;
2670#else
2671 unsigned int index;
2672#endif
2673 target_ulong ptr;
2674
2675 selector &= 0xffff;
2676 cpl = env->hflags & HF_CPL_MASK;
2677#ifdef VBOX
2678
2679 /* Trying to load a selector with CPL=1? */
2680 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2681 {
2682 Log(("RPL 1 -> sel %04X -> %04X (helper_load_seg)\n", selector, selector & 0xfffc));
2683 selector = selector & 0xfffc;
2684 }
2685#endif /* VBOX */
2686 if ((selector & 0xfffc) == 0) {
2687 /* null selector case */
2688#ifndef VBOX
2689 if (seg_reg == R_SS
2690#ifdef TARGET_X86_64
2691 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2692#endif
2693 )
2694 raise_exception_err(EXCP0D_GPF, 0);
2695 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2696#else
2697 if (seg_reg == R_SS) {
2698 if (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2699 raise_exception_err(EXCP0D_GPF, 0);
2700 e2 = (cpl << DESC_DPL_SHIFT) | DESC_INTEL_UNUSABLE;
2701 } else {
2702 e2 = DESC_INTEL_UNUSABLE;
2703 }
2704 cpu_x86_load_seg_cache_with_clean_flags(env, seg_reg, selector, 0, 0, e2);
2705#endif
2706 } else {
2707
2708 if (selector & 0x4)
2709 dt = &env->ldt;
2710 else
2711 dt = &env->gdt;
2712 index = selector & ~7;
2713 if ((index + 7) > dt->limit)
2714 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2715 ptr = dt->base + index;
2716 e1 = ldl_kernel(ptr);
2717 e2 = ldl_kernel(ptr + 4);
2718
2719 if (!(e2 & DESC_S_MASK))
2720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2721 rpl = selector & 3;
2722 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2723 if (seg_reg == R_SS) {
2724 /* must be writable segment */
2725 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2726 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2727 if (rpl != cpl || dpl != cpl)
2728 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2729 } else {
2730 /* must be readable segment */
2731 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2733
2734 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2735 /* if not conforming code, test rights */
2736 if (dpl < cpl || dpl < rpl)
2737 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2738 }
2739 }
2740
2741 if (!(e2 & DESC_P_MASK)) {
2742 if (seg_reg == R_SS)
2743 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2744 else
2745 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2746 }
2747
2748 /* set the access bit if not already set */
2749 if (!(e2 & DESC_A_MASK)) {
2750 e2 |= DESC_A_MASK;
2751 stl_kernel(ptr + 4, e2);
2752 }
2753
2754 cpu_x86_load_seg_cache(env, seg_reg, selector,
2755 get_seg_base(e1, e2),
2756 get_seg_limit(e1, e2),
2757 e2);
2758#if 0
2759 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2760 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2761#endif
2762 }
2763}
2764
2765/* protected mode jump */
2766void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2767 int next_eip_addend)
2768{
2769 int gate_cs, type;
2770 uint32_t e1, e2, cpl, dpl, rpl, limit;
2771 target_ulong next_eip;
2772
2773#ifdef VBOX /** @todo Why do we do this? */
2774 e1 = e2 = 0;
2775#endif
2776 if ((new_cs & 0xfffc) == 0)
2777 raise_exception_err(EXCP0D_GPF, 0);
2778 if (load_segment(&e1, &e2, new_cs) != 0)
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 cpl = env->hflags & HF_CPL_MASK;
2781 if (e2 & DESC_S_MASK) {
2782 if (!(e2 & DESC_CS_MASK))
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2785 if (e2 & DESC_C_MASK) {
2786 /* conforming code segment */
2787 if (dpl > cpl)
2788 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2789 } else {
2790 /* non conforming code segment */
2791 rpl = new_cs & 3;
2792 if (rpl > cpl)
2793 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2794 if (dpl != cpl)
2795 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2796 }
2797 if (!(e2 & DESC_P_MASK))
2798 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2799 limit = get_seg_limit(e1, e2);
2800 if (new_eip > limit &&
2801 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2802 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2803#ifdef VBOX
2804 if (!(e2 & DESC_A_MASK))
2805 e2 = set_segment_accessed(new_cs, e2);
2806#endif
2807 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2808 get_seg_base(e1, e2), limit, e2);
2809 EIP = new_eip;
2810 } else {
2811 /* jump to call or task gate */
2812 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2813 rpl = new_cs & 3;
2814 cpl = env->hflags & HF_CPL_MASK;
2815 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2816 switch(type) {
2817 case 1: /* 286 TSS */
2818 case 9: /* 386 TSS */
2819 case 5: /* task gate */
2820 if (dpl < cpl || dpl < rpl)
2821 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2822 next_eip = env->eip + next_eip_addend;
2823 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2824 CC_OP = CC_OP_EFLAGS;
2825 break;
2826 case 4: /* 286 call gate */
2827 case 12: /* 386 call gate */
2828 if ((dpl < cpl) || (dpl < rpl))
2829 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2830 if (!(e2 & DESC_P_MASK))
2831 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2832 gate_cs = e1 >> 16;
2833 new_eip = (e1 & 0xffff);
2834 if (type == 12)
2835 new_eip |= (e2 & 0xffff0000);
2836 if (load_segment(&e1, &e2, gate_cs) != 0)
2837 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2838 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2839 /* must be code segment */
2840 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2841 (DESC_S_MASK | DESC_CS_MASK)))
2842 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2843 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2844 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2845 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2846 if (!(e2 & DESC_P_MASK))
2847#ifdef VBOX /* See page 3-514 of 253666.pdf */
2848 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2849#else
2850 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2851#endif
2852 limit = get_seg_limit(e1, e2);
2853 if (new_eip > limit)
2854 raise_exception_err(EXCP0D_GPF, 0);
2855 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2856 get_seg_base(e1, e2), limit, e2);
2857 EIP = new_eip;
2858 break;
2859 default:
2860 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2861 break;
2862 }
2863 }
2864}
2865
2866/* real mode call */
2867void helper_lcall_real(int new_cs, target_ulong new_eip1,
2868 int shift, int next_eip)
2869{
2870 int new_eip;
2871 uint32_t esp, esp_mask;
2872 target_ulong ssp;
2873
2874 new_eip = new_eip1;
2875 esp = ESP;
2876 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2877 ssp = env->segs[R_SS].base;
2878 if (shift) {
2879 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2880 PUSHL(ssp, esp, esp_mask, next_eip);
2881 } else {
2882 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2883 PUSHW(ssp, esp, esp_mask, next_eip);
2884 }
2885
2886 SET_ESP(esp, esp_mask);
2887 env->eip = new_eip;
2888 env->segs[R_CS].selector = new_cs;
2889 env->segs[R_CS].base = (new_cs << 4);
2890}
2891
2892/* protected mode call */
2893void helper_lcall_protected(int new_cs, target_ulong new_eip,
2894 int shift, int next_eip_addend)
2895{
2896 int new_stack, i;
2897 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2898 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2899 uint32_t val, limit, old_sp_mask;
2900 target_ulong ssp, old_ssp, next_eip;
2901
2902#ifdef VBOX /** @todo Why do we do this? */
2903 e1 = e2 = 0;
2904#endif
2905 next_eip = env->eip + next_eip_addend;
2906 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2907 LOG_PCALL_STATE(env);
2908 if ((new_cs & 0xfffc) == 0)
2909 raise_exception_err(EXCP0D_GPF, 0);
2910 if (load_segment(&e1, &e2, new_cs) != 0)
2911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2912 cpl = env->hflags & HF_CPL_MASK;
2913 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2914 if (e2 & DESC_S_MASK) {
2915 if (!(e2 & DESC_CS_MASK))
2916 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2918 if (e2 & DESC_C_MASK) {
2919 /* conforming code segment */
2920 if (dpl > cpl)
2921 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2922 } else {
2923 /* non conforming code segment */
2924 rpl = new_cs & 3;
2925 if (rpl > cpl)
2926 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2927 if (dpl != cpl)
2928 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2929 }
2930 if (!(e2 & DESC_P_MASK))
2931 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2932#ifdef VBOX
2933 if (!(e2 & DESC_A_MASK))
2934 e2 = set_segment_accessed(new_cs, e2);
2935#endif
2936
2937#ifdef TARGET_X86_64
2938 /* XXX: check 16/32 bit cases in long mode */
2939 if (shift == 2) {
2940 target_ulong rsp;
2941 /* 64 bit case */
2942 rsp = ESP;
2943 PUSHQ(rsp, env->segs[R_CS].selector);
2944 PUSHQ(rsp, next_eip);
2945 /* from this point, not restartable */
2946 ESP = rsp;
2947 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2948 get_seg_base(e1, e2),
2949 get_seg_limit(e1, e2), e2);
2950 EIP = new_eip;
2951 } else
2952#endif
2953 {
2954 sp = ESP;
2955 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2956 ssp = env->segs[R_SS].base;
2957 if (shift) {
2958 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2959 PUSHL(ssp, sp, sp_mask, next_eip);
2960 } else {
2961 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2962 PUSHW(ssp, sp, sp_mask, next_eip);
2963 }
2964
2965 limit = get_seg_limit(e1, e2);
2966 if (new_eip > limit)
2967 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2968 /* from this point, not restartable */
2969 SET_ESP(sp, sp_mask);
2970 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2971 get_seg_base(e1, e2), limit, e2);
2972 EIP = new_eip;
2973 }
2974 } else {
2975 /* check gate type */
2976 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2977 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2978 rpl = new_cs & 3;
2979 switch(type) {
2980 case 1: /* available 286 TSS */
2981 case 9: /* available 386 TSS */
2982 case 5: /* task gate */
2983 if (dpl < cpl || dpl < rpl)
2984 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2985 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2986 CC_OP = CC_OP_EFLAGS;
2987 return;
2988 case 4: /* 286 call gate */
2989 case 12: /* 386 call gate */
2990 break;
2991 default:
2992 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2993 break;
2994 }
2995 shift = type >> 3;
2996
2997 if (dpl < cpl || dpl < rpl)
2998 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2999 /* check valid bit */
3000 if (!(e2 & DESC_P_MASK))
3001 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3002 selector = e1 >> 16;
3003 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
3004 param_count = e2 & 0x1f;
3005 if ((selector & 0xfffc) == 0)
3006 raise_exception_err(EXCP0D_GPF, 0);
3007
3008 if (load_segment(&e1, &e2, selector) != 0)
3009 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3010 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
3011 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3012 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3013 if (dpl > cpl)
3014 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3015 if (!(e2 & DESC_P_MASK))
3016 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
3017
3018 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
3019 /* to inner privilege */
3020 get_ss_esp_from_tss(&ss, &sp, dpl);
3021 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
3022 ss, sp, param_count, ESP);
3023 if ((ss & 0xfffc) == 0)
3024 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3025 if ((ss & 3) != dpl)
3026 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3027 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
3028 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3029 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3030 if (ss_dpl != dpl)
3031 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3032 if (!(ss_e2 & DESC_S_MASK) ||
3033 (ss_e2 & DESC_CS_MASK) ||
3034 !(ss_e2 & DESC_W_MASK))
3035 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3036 if (!(ss_e2 & DESC_P_MASK))
3037#ifdef VBOX /* See page 3-99 of 253666.pdf */
3038 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
3039#else
3040 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3041#endif
3042
3043 // push_size = ((param_count * 2) + 8) << shift;
3044
3045 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
3046 old_ssp = env->segs[R_SS].base;
3047
3048 sp_mask = get_sp_mask(ss_e2);
3049 ssp = get_seg_base(ss_e1, ss_e2);
3050 if (shift) {
3051 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
3052 PUSHL(ssp, sp, sp_mask, ESP);
3053 for(i = param_count - 1; i >= 0; i--) {
3054 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
3055 PUSHL(ssp, sp, sp_mask, val);
3056 }
3057 } else {
3058 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
3059 PUSHW(ssp, sp, sp_mask, ESP);
3060 for(i = param_count - 1; i >= 0; i--) {
3061 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
3062 PUSHW(ssp, sp, sp_mask, val);
3063 }
3064 }
3065 new_stack = 1;
3066 } else {
3067 /* to same privilege */
3068 sp = ESP;
3069 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3070 ssp = env->segs[R_SS].base;
3071 // push_size = (4 << shift);
3072 new_stack = 0;
3073 }
3074
3075 if (shift) {
3076 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
3077 PUSHL(ssp, sp, sp_mask, next_eip);
3078 } else {
3079 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
3080 PUSHW(ssp, sp, sp_mask, next_eip);
3081 }
3082
3083 /* from this point, not restartable */
3084
3085 if (new_stack) {
3086 ss = (ss & ~3) | dpl;
3087 cpu_x86_load_seg_cache(env, R_SS, ss,
3088 ssp,
3089 get_seg_limit(ss_e1, ss_e2),
3090 ss_e2);
3091 }
3092
3093 selector = (selector & ~3) | dpl;
3094 cpu_x86_load_seg_cache(env, R_CS, selector,
3095 get_seg_base(e1, e2),
3096 get_seg_limit(e1, e2),
3097 e2);
3098 cpu_x86_set_cpl(env, dpl);
3099 SET_ESP(sp, sp_mask);
3100 EIP = offset;
3101 }
3102}
3103
3104/* real and vm86 mode iret */
3105void helper_iret_real(int shift)
3106{
3107 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
3108 target_ulong ssp;
3109 int eflags_mask;
3110#ifdef VBOX
3111 bool fVME = false;
3112
3113 remR3TrapClear(env->pVM);
3114#endif /* VBOX */
3115
3116 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
3117 sp = ESP;
3118 ssp = env->segs[R_SS].base;
3119 if (shift == 1) {
3120 /* 32 bits */
3121 POPL(ssp, sp, sp_mask, new_eip);
3122 POPL(ssp, sp, sp_mask, new_cs);
3123 new_cs &= 0xffff;
3124 POPL(ssp, sp, sp_mask, new_eflags);
3125 } else {
3126 /* 16 bits */
3127 POPW(ssp, sp, sp_mask, new_eip);
3128 POPW(ssp, sp, sp_mask, new_cs);
3129 POPW(ssp, sp, sp_mask, new_eflags);
3130 }
3131#ifdef VBOX
3132 if ( (env->eflags & VM_MASK)
3133 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
3134 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
3135 {
3136 fVME = true;
3137 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
3138 /* if TF will be set -> #GP */
3139 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
3140 || (new_eflags & TF_MASK))
3141 raise_exception(EXCP0D_GPF);
3142 }
3143#endif /* VBOX */
3144 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
3145 env->segs[R_CS].selector = new_cs;
3146 env->segs[R_CS].base = (new_cs << 4);
3147 env->eip = new_eip;
3148#ifdef VBOX
3149 if (fVME)
3150 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3151 else
3152#endif
3153 if (env->eflags & VM_MASK)
3154 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
3155 else
3156 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
3157 if (shift == 0)
3158 eflags_mask &= 0xffff;
3159 load_eflags(new_eflags, eflags_mask);
3160 env->hflags2 &= ~HF2_NMI_MASK;
3161#ifdef VBOX
3162 if (fVME)
3163 {
3164 if (new_eflags & IF_MASK)
3165 env->eflags |= VIF_MASK;
3166 else
3167 env->eflags &= ~VIF_MASK;
3168 }
3169#endif /* VBOX */
3170}
3171
3172static inline void validate_seg(int seg_reg, int cpl)
3173{
3174 int dpl;
3175 uint32_t e2;
3176
3177 /* XXX: on x86_64, we do not want to nullify FS and GS because
3178 they may still contain a valid base. I would be interested to
3179 know how a real x86_64 CPU behaves */
3180 if ((seg_reg == R_FS || seg_reg == R_GS) &&
3181 (env->segs[seg_reg].selector & 0xfffc) == 0)
3182 return;
3183
3184 e2 = env->segs[seg_reg].flags;
3185 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3186 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
3187 /* data or non conforming code segment */
3188 if (dpl < cpl) {
3189 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
3190 }
3191 }
3192}
3193
3194/* protected mode iret */
3195static inline void helper_ret_protected(int shift, int is_iret, int addend)
3196{
3197 uint32_t new_cs, new_eflags, new_ss;
3198 uint32_t new_es, new_ds, new_fs, new_gs;
3199 uint32_t e1, e2, ss_e1, ss_e2;
3200 int cpl, dpl, rpl, eflags_mask, iopl;
3201 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3202
3203#ifdef VBOX /** @todo Why do we do this? */
3204 ss_e1 = ss_e2 = e1 = e2 = 0;
3205#endif
3206
3207#ifdef TARGET_X86_64
3208 if (shift == 2)
3209 sp_mask = -1;
3210 else
3211#endif
3212 sp_mask = get_sp_mask(env->segs[R_SS].flags);
3213 sp = ESP;
3214 ssp = env->segs[R_SS].base;
3215 new_eflags = 0; /* avoid warning */
3216#ifdef TARGET_X86_64
3217 if (shift == 2) {
3218 POPQ(sp, new_eip);
3219 POPQ(sp, new_cs);
3220 new_cs &= 0xffff;
3221 if (is_iret) {
3222 POPQ(sp, new_eflags);
3223 }
3224 } else
3225#endif
3226 if (shift == 1) {
3227 /* 32 bits */
3228 POPL(ssp, sp, sp_mask, new_eip);
3229 POPL(ssp, sp, sp_mask, new_cs);
3230 new_cs &= 0xffff;
3231 if (is_iret) {
3232 POPL(ssp, sp, sp_mask, new_eflags);
3233#define LOG_GROUP LOG_GROUP_REM
3234#if defined(VBOX) && defined(DEBUG)
3235 Log(("iret: new CS %04X (old=%x)\n", new_cs, env->segs[R_CS].selector));
3236 Log(("iret: new EIP %08X\n", (uint32_t)new_eip));
3237 Log(("iret: new EFLAGS %08X\n", new_eflags));
3238 Log(("iret: EAX=%08x\n", (uint32_t)EAX));
3239#endif
3240 if (new_eflags & VM_MASK)
3241 goto return_to_vm86;
3242 }
3243#ifdef VBOX
3244 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
3245 {
3246 if ( !EMIsRawRing1Enabled(env->pVM)
3247 || env->segs[R_CS].selector == (new_cs & 0xfffc))
3248 {
3249 Log(("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc));
3250 new_cs = new_cs & 0xfffc;
3251 }
3252 else
3253 {
3254 /* Ugly assumption: assume a genuine switch to ring-1. */
3255 Log(("Genuine switch to ring-1 (iret)\n"));
3256 }
3257 }
3258 else if ((new_cs & 0x3) == 2 && (env->state & CPU_RAW_RING0) && EMIsRawRing1Enabled(env->pVM))
3259 {
3260 Log(("RPL 2 -> new_cs %04X -> %04X\n", new_cs, (new_cs & 0xfffc) | 1));
3261 new_cs = (new_cs & 0xfffc) | 1;
3262 }
3263#endif
3264 } else {
3265 /* 16 bits */
3266 POPW(ssp, sp, sp_mask, new_eip);
3267 POPW(ssp, sp, sp_mask, new_cs);
3268 if (is_iret)
3269 POPW(ssp, sp, sp_mask, new_eflags);
3270 }
3271 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
3272 new_cs, new_eip, shift, addend);
3273 LOG_PCALL_STATE(env);
3274 if ((new_cs & 0xfffc) == 0)
3275 {
3276#if defined(VBOX) && defined(DEBUG)
3277 Log(("new_cs & 0xfffc) == 0\n"));
3278#endif
3279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3280 }
3281 if (load_segment(&e1, &e2, new_cs) != 0)
3282 {
3283#if defined(VBOX) && defined(DEBUG)
3284 Log(("load_segment failed\n"));
3285#endif
3286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3287 }
3288 if (!(e2 & DESC_S_MASK) ||
3289 !(e2 & DESC_CS_MASK))
3290 {
3291#if defined(VBOX) && defined(DEBUG)
3292 Log(("e2 mask %08x\n", e2));
3293#endif
3294 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3295 }
3296 cpl = env->hflags & HF_CPL_MASK;
3297 rpl = new_cs & 3;
3298 if (rpl < cpl)
3299 {
3300#if defined(VBOX) && defined(DEBUG)
3301 Log(("rpl < cpl (%d vs %d)\n", rpl, cpl));
3302#endif
3303 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3304 }
3305 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3306
3307 if (e2 & DESC_C_MASK) {
3308 if (dpl > rpl)
3309 {
3310#if defined(VBOX) && defined(DEBUG)
3311 Log(("dpl > rpl (%d vs %d)\n", dpl, rpl));
3312#endif
3313 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3314 }
3315 } else {
3316 if (dpl != rpl)
3317 {
3318#if defined(VBOX) && defined(DEBUG)
3319 Log(("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2));
3320#endif
3321 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
3322 }
3323 }
3324 if (!(e2 & DESC_P_MASK))
3325 {
3326#if defined(VBOX) && defined(DEBUG)
3327 Log(("DESC_P_MASK e2=%08x\n", e2));
3328#endif
3329 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3330 }
3331
3332 sp += addend;
3333 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
3334 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
3335 /* return to same privilege level */
3336#ifdef VBOX
3337 if (!(e2 & DESC_A_MASK))
3338 e2 = set_segment_accessed(new_cs, e2);
3339#endif
3340 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3341 get_seg_base(e1, e2),
3342 get_seg_limit(e1, e2),
3343 e2);
3344 } else {
3345 /* return to different privilege level */
3346#ifdef TARGET_X86_64
3347 if (shift == 2) {
3348 POPQ(sp, new_esp);
3349 POPQ(sp, new_ss);
3350 new_ss &= 0xffff;
3351 } else
3352#endif
3353 if (shift == 1) {
3354 /* 32 bits */
3355 POPL(ssp, sp, sp_mask, new_esp);
3356 POPL(ssp, sp, sp_mask, new_ss);
3357 new_ss &= 0xffff;
3358 } else {
3359 /* 16 bits */
3360 POPW(ssp, sp, sp_mask, new_esp);
3361 POPW(ssp, sp, sp_mask, new_ss);
3362 }
3363 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
3364 new_ss, new_esp);
3365 if ((new_ss & 0xfffc) == 0) {
3366#ifdef TARGET_X86_64
3367 /* NULL ss is allowed in long mode if cpl != 3*/
3368# ifndef VBOX
3369 /* XXX: test CS64 ? */
3370 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
3371 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3372 0, 0xffffffff,
3373 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3374 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
3375 DESC_W_MASK | DESC_A_MASK);
3376 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
3377 } else
3378# else /* VBOX */
3379 if ((env->hflags & HF_LMA_MASK) && rpl != 3 && (e2 & DESC_L_MASK)) {
3380 if (!(e2 & DESC_A_MASK))
3381 e2 = set_segment_accessed(new_cs, e2);
3382 cpu_x86_load_seg_cache_with_clean_flags(env, R_SS, new_ss,
3383 0, 0xffffffff,
3384 DESC_INTEL_UNUSABLE | (rpl << DESC_DPL_SHIFT) );
3385 ss_e2 = DESC_B_MASK; /* not really used */
3386 } else
3387# endif
3388#endif
3389 {
3390#if defined(VBOX) && defined(DEBUG)
3391 Log(("NULL ss, rpl=%d\n", rpl));
3392#endif
3393 raise_exception_err(EXCP0D_GPF, 0);
3394 }
3395 } else {
3396 if ((new_ss & 3) != rpl)
3397 {
3398#if defined(VBOX) && defined(DEBUG)
3399 Log(("new_ss=%x != rpl=%d\n", new_ss, rpl));
3400#endif
3401 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3402 }
3403 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
3404 {
3405#if defined(VBOX) && defined(DEBUG)
3406 Log(("new_ss=%x load error\n", new_ss));
3407#endif
3408 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3409 }
3410 if (!(ss_e2 & DESC_S_MASK) ||
3411 (ss_e2 & DESC_CS_MASK) ||
3412 !(ss_e2 & DESC_W_MASK))
3413 {
3414#if defined(VBOX) && defined(DEBUG)
3415 Log(("new_ss=%x ss_e2=%#x bad type\n", new_ss, ss_e2));
3416#endif
3417 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3418 }
3419 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
3420 if (dpl != rpl)
3421 {
3422#if defined(VBOX) && defined(DEBUG)
3423 Log(("SS.dpl=%u != rpl=%u\n", dpl, rpl));
3424#endif
3425 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
3426 }
3427 if (!(ss_e2 & DESC_P_MASK))
3428 {
3429#if defined(VBOX) && defined(DEBUG)
3430 Log(("new_ss=%#x #NP\n", new_ss));
3431#endif
3432 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
3433 }
3434#ifdef VBOX
3435 if (!(e2 & DESC_A_MASK))
3436 e2 = set_segment_accessed(new_cs, e2);
3437 if (!(ss_e2 & DESC_A_MASK))
3438 ss_e2 = set_segment_accessed(new_ss, ss_e2);
3439#endif
3440 cpu_x86_load_seg_cache(env, R_SS, new_ss,
3441 get_seg_base(ss_e1, ss_e2),
3442 get_seg_limit(ss_e1, ss_e2),
3443 ss_e2);
3444 }
3445
3446 cpu_x86_load_seg_cache(env, R_CS, new_cs,
3447 get_seg_base(e1, e2),
3448 get_seg_limit(e1, e2),
3449 e2);
3450 cpu_x86_set_cpl(env, rpl);
3451 sp = new_esp;
3452#ifdef TARGET_X86_64
3453 if (env->hflags & HF_CS64_MASK)
3454 sp_mask = -1;
3455 else
3456#endif
3457 sp_mask = get_sp_mask(ss_e2);
3458
3459 /* validate data segments */
3460 validate_seg(R_ES, rpl);
3461 validate_seg(R_DS, rpl);
3462 validate_seg(R_FS, rpl);
3463 validate_seg(R_GS, rpl);
3464
3465 sp += addend;
3466 }
3467 SET_ESP(sp, sp_mask);
3468 env->eip = new_eip;
3469 if (is_iret) {
3470 /* NOTE: 'cpl' is the _old_ CPL */
3471 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
3472 if (cpl == 0)
3473#ifdef VBOX
3474 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
3475#else
3476 eflags_mask |= IOPL_MASK;
3477#endif
3478 iopl = (env->eflags >> IOPL_SHIFT) & 3;
3479 if (cpl <= iopl)
3480 eflags_mask |= IF_MASK;
3481 if (shift == 0)
3482 eflags_mask &= 0xffff;
3483 load_eflags(new_eflags, eflags_mask);
3484 }
3485 return;
3486
3487 return_to_vm86:
3488 POPL(ssp, sp, sp_mask, new_esp);
3489 POPL(ssp, sp, sp_mask, new_ss);
3490 POPL(ssp, sp, sp_mask, new_es);
3491 POPL(ssp, sp, sp_mask, new_ds);
3492 POPL(ssp, sp, sp_mask, new_fs);
3493 POPL(ssp, sp, sp_mask, new_gs);
3494
3495 /* modify processor state */
3496 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
3497 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
3498 load_seg_vm(R_CS, new_cs & 0xffff);
3499 cpu_x86_set_cpl(env, 3);
3500 load_seg_vm(R_SS, new_ss & 0xffff);
3501 load_seg_vm(R_ES, new_es & 0xffff);
3502 load_seg_vm(R_DS, new_ds & 0xffff);
3503 load_seg_vm(R_FS, new_fs & 0xffff);
3504 load_seg_vm(R_GS, new_gs & 0xffff);
3505
3506 env->eip = new_eip & 0xffff;
3507 ESP = new_esp;
3508}
3509
3510void helper_iret_protected(int shift, int next_eip)
3511{
3512 int tss_selector, type;
3513 uint32_t e1, e2;
3514
3515#ifdef VBOX
3516 Log(("iret (shift=%d new_eip=%#x)\n", shift, next_eip));
3517 e1 = e2 = 0; /** @todo Why do we do this? */
3518 remR3TrapClear(env->pVM);
3519#endif
3520
3521 /* specific case for TSS */
3522 if (env->eflags & NT_MASK) {
3523#ifdef TARGET_X86_64
3524 if (env->hflags & HF_LMA_MASK)
3525 {
3526#if defined(VBOX) && defined(DEBUG)
3527 Log(("eflags.NT=1 on iret in long mode\n"));
3528#endif
3529 raise_exception_err(EXCP0D_GPF, 0);
3530 }
3531#endif
3532 tss_selector = lduw_kernel(env->tr.base + 0);
3533 if (tss_selector & 4)
3534 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3535 if (load_segment(&e1, &e2, tss_selector) != 0)
3536 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3537 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3538 /* NOTE: we check both segment and busy TSS */
3539 if (type != 3)
3540 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
3541 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3542 } else {
3543 helper_ret_protected(shift, 1, 0);
3544 }
3545 env->hflags2 &= ~HF2_NMI_MASK;
3546}
3547
3548void helper_lret_protected(int shift, int addend)
3549{
3550 helper_ret_protected(shift, 0, addend);
3551}
3552
3553void helper_sysenter(void)
3554{
3555 if (env->sysenter_cs == 0) {
3556 raise_exception_err(EXCP0D_GPF, 0);
3557 }
3558 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3559 cpu_x86_set_cpl(env, 0);
3560
3561#ifdef TARGET_X86_64
3562 if (env->hflags & HF_LMA_MASK) {
3563 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3564 0, 0xffffffff,
3565 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3566 DESC_S_MASK |
3567 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3568 } else
3569#endif
3570 {
3571 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3572 0, 0xffffffff,
3573 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3574 DESC_S_MASK |
3575 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3576 }
3577 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3578 0, 0xffffffff,
3579 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3580 DESC_S_MASK |
3581 DESC_W_MASK | DESC_A_MASK);
3582 ESP = env->sysenter_esp;
3583 EIP = env->sysenter_eip;
3584}
3585
3586void helper_sysexit(int dflag)
3587{
3588 int cpl;
3589
3590 cpl = env->hflags & HF_CPL_MASK;
3591 if (env->sysenter_cs == 0 || cpl != 0) {
3592 raise_exception_err(EXCP0D_GPF, 0);
3593 }
3594 cpu_x86_set_cpl(env, 3);
3595#ifdef TARGET_X86_64
3596 if (dflag == 2) {
3597 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
3598 0, 0xffffffff,
3599 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3600 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3601 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3602 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3603 0, 0xffffffff,
3604 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3605 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3606 DESC_W_MASK | DESC_A_MASK);
3607 } else
3608#endif
3609 {
3610 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3611 0, 0xffffffff,
3612 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3613 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3614 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3615 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3616 0, 0xffffffff,
3617 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3618 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3619 DESC_W_MASK | DESC_A_MASK);
3620 }
3621 ESP = ECX;
3622 EIP = EDX;
3623}
3624
3625#if defined(CONFIG_USER_ONLY)
3626target_ulong helper_read_crN(int reg)
3627{
3628 return 0;
3629}
3630
3631void helper_write_crN(int reg, target_ulong t0)
3632{
3633}
3634
3635void helper_movl_drN_T0(int reg, target_ulong t0)
3636{
3637}
3638#else
3639target_ulong helper_read_crN(int reg)
3640{
3641 target_ulong val;
3642
3643 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3644 switch(reg) {
3645 default:
3646 val = env->cr[reg];
3647 break;
3648 case 8:
3649 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3650#ifndef VBOX
3651 val = cpu_get_apic_tpr(env->apic_state);
3652#else /* VBOX */
3653 val = cpu_get_apic_tpr(env);
3654#endif /* VBOX */
3655 } else {
3656 val = env->v_tpr;
3657 }
3658 break;
3659 }
3660 return val;
3661}
3662
3663void helper_write_crN(int reg, target_ulong t0)
3664{
3665 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3666 switch(reg) {
3667 case 0:
3668 cpu_x86_update_cr0(env, t0);
3669 break;
3670 case 3:
3671 cpu_x86_update_cr3(env, t0);
3672 break;
3673 case 4:
3674 cpu_x86_update_cr4(env, t0);
3675 break;
3676 case 8:
3677 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3678#ifndef VBOX
3679 cpu_set_apic_tpr(env->apic_state, t0);
3680#else /* VBOX */
3681 cpu_set_apic_tpr(env, t0);
3682#endif /* VBOX */
3683 }
3684 env->v_tpr = t0 & 0x0f;
3685 break;
3686 default:
3687 env->cr[reg] = t0;
3688 break;
3689 }
3690}
3691
3692void helper_movl_drN_T0(int reg, target_ulong t0)
3693{
3694 int i;
3695
3696 if (reg < 4) {
3697 hw_breakpoint_remove(env, reg);
3698 env->dr[reg] = t0;
3699 hw_breakpoint_insert(env, reg);
3700# ifndef VBOX
3701 } else if (reg == 7) {
3702# else
3703 } else if (reg == 7 || reg == 5) { /* (DR5 is an alias for DR7.) */
3704 if (t0 & X86_DR7_MBZ_MASK)
3705 raise_exception_err(EXCP0D_GPF, 0);
3706 t0 |= X86_DR7_RA1_MASK;
3707 t0 &= ~X86_DR7_RAZ_MASK;
3708# endif
3709 for (i = 0; i < 4; i++)
3710 hw_breakpoint_remove(env, i);
3711 env->dr[7] = t0;
3712 for (i = 0; i < 4; i++)
3713 hw_breakpoint_insert(env, i);
3714 } else {
3715# ifndef VBOX
3716 env->dr[reg] = t0;
3717# else
3718 if (t0 & X86_DR6_MBZ_MASK)
3719 raise_exception_err(EXCP0D_GPF, 0);
3720 t0 |= X86_DR6_RA1_MASK;
3721 t0 &= ~X86_DR6_RAZ_MASK;
3722 env->dr[6] = t0; /* (DR4 is an alias for DR6.) */
3723# endif
3724 }
3725}
3726#endif
3727
3728void helper_lmsw(target_ulong t0)
3729{
3730 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3731 if already set to one. */
3732 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3733 helper_write_crN(0, t0);
3734}
3735
3736void helper_clts(void)
3737{
3738 env->cr[0] &= ~CR0_TS_MASK;
3739 env->hflags &= ~HF_TS_MASK;
3740}
3741
3742void helper_invlpg(target_ulong addr)
3743{
3744 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3745 tlb_flush_page(env, addr);
3746}
3747
3748void helper_rdtsc(void)
3749{
3750 uint64_t val;
3751
3752 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3753 raise_exception(EXCP0D_GPF);
3754 }
3755 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3756
3757 val = cpu_get_tsc(env) + env->tsc_offset;
3758 EAX = (uint32_t)(val);
3759 EDX = (uint32_t)(val >> 32);
3760}
3761
3762void helper_rdtscp(void)
3763{
3764 helper_rdtsc();
3765#ifndef VBOX
3766 ECX = (uint32_t)(env->tsc_aux);
3767#else /* VBOX */
3768 uint64_t val;
3769 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0)
3770 ECX = (uint32_t)(val);
3771 else
3772 ECX = 0;
3773#endif /* VBOX */
3774}
3775
3776void helper_rdpmc(void)
3777{
3778#ifdef VBOX
3779 /* If X86_CR4_PCE is *not* set, then CPL must be zero. */
3780 if (!(env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3781 raise_exception(EXCP0D_GPF);
3782 }
3783 /* Just return zero here; rather tricky to properly emulate this, especially as the specs are a mess. */
3784 EAX = 0;
3785 EDX = 0;
3786#else /* !VBOX */
3787 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3788 raise_exception(EXCP0D_GPF);
3789 }
3790 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3791
3792 /* currently unimplemented */
3793 raise_exception_err(EXCP06_ILLOP, 0);
3794#endif /* !VBOX */
3795}
3796
3797#if defined(CONFIG_USER_ONLY)
3798void helper_wrmsr(void)
3799{
3800}
3801
3802void helper_rdmsr(void)
3803{
3804}
3805#else
3806void helper_wrmsr(void)
3807{
3808 uint64_t val;
3809
3810 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3811
3812 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3813
3814 switch((uint32_t)ECX) {
3815 case MSR_IA32_SYSENTER_CS:
3816 env->sysenter_cs = val & 0xffff;
3817 break;
3818 case MSR_IA32_SYSENTER_ESP:
3819 env->sysenter_esp = val;
3820 break;
3821 case MSR_IA32_SYSENTER_EIP:
3822 env->sysenter_eip = val;
3823 break;
3824 case MSR_IA32_APICBASE:
3825# ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */
3826 cpu_set_apic_base(env->apic_state, val);
3827# endif
3828 break;
3829 case MSR_EFER:
3830 {
3831 uint64_t update_mask;
3832 update_mask = 0;
3833 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3834 update_mask |= MSR_EFER_SCE;
3835 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3836 update_mask |= MSR_EFER_LME;
3837 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3838 update_mask |= MSR_EFER_FFXSR;
3839 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3840 update_mask |= MSR_EFER_NXE;
3841 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3842 update_mask |= MSR_EFER_SVME;
3843 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3844 update_mask |= MSR_EFER_FFXSR;
3845 cpu_load_efer(env, (env->efer & ~update_mask) |
3846 (val & update_mask));
3847 }
3848 break;
3849 case MSR_STAR:
3850 env->star = val;
3851 break;
3852 case MSR_PAT:
3853 env->pat = val;
3854 break;
3855 case MSR_VM_HSAVE_PA:
3856 env->vm_hsave = val;
3857 break;
3858#ifdef TARGET_X86_64
3859 case MSR_LSTAR:
3860 env->lstar = val;
3861 break;
3862 case MSR_CSTAR:
3863 env->cstar = val;
3864 break;
3865 case MSR_FMASK:
3866 env->fmask = val;
3867 break;
3868 case MSR_FSBASE:
3869 env->segs[R_FS].base = val;
3870 break;
3871 case MSR_GSBASE:
3872 env->segs[R_GS].base = val;
3873 break;
3874 case MSR_KERNELGSBASE:
3875 env->kernelgsbase = val;
3876 break;
3877#endif
3878# ifndef VBOX
3879 case MSR_MTRRphysBase(0):
3880 case MSR_MTRRphysBase(1):
3881 case MSR_MTRRphysBase(2):
3882 case MSR_MTRRphysBase(3):
3883 case MSR_MTRRphysBase(4):
3884 case MSR_MTRRphysBase(5):
3885 case MSR_MTRRphysBase(6):
3886 case MSR_MTRRphysBase(7):
3887 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3888 break;
3889 case MSR_MTRRphysMask(0):
3890 case MSR_MTRRphysMask(1):
3891 case MSR_MTRRphysMask(2):
3892 case MSR_MTRRphysMask(3):
3893 case MSR_MTRRphysMask(4):
3894 case MSR_MTRRphysMask(5):
3895 case MSR_MTRRphysMask(6):
3896 case MSR_MTRRphysMask(7):
3897 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3898 break;
3899 case MSR_MTRRfix64K_00000:
3900 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3901 break;
3902 case MSR_MTRRfix16K_80000:
3903 case MSR_MTRRfix16K_A0000:
3904 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3905 break;
3906 case MSR_MTRRfix4K_C0000:
3907 case MSR_MTRRfix4K_C8000:
3908 case MSR_MTRRfix4K_D0000:
3909 case MSR_MTRRfix4K_D8000:
3910 case MSR_MTRRfix4K_E0000:
3911 case MSR_MTRRfix4K_E8000:
3912 case MSR_MTRRfix4K_F0000:
3913 case MSR_MTRRfix4K_F8000:
3914 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3915 break;
3916 case MSR_MTRRdefType:
3917 env->mtrr_deftype = val;
3918 break;
3919 case MSR_MCG_STATUS:
3920 env->mcg_status = val;
3921 break;
3922 case MSR_MCG_CTL:
3923 if ((env->mcg_cap & MCG_CTL_P)
3924 && (val == 0 || val == ~(uint64_t)0))
3925 env->mcg_ctl = val;
3926 break;
3927 case MSR_TSC_AUX:
3928 env->tsc_aux = val;
3929 break;
3930# endif /* !VBOX */
3931 default:
3932# ifndef VBOX
3933 if ((uint32_t)ECX >= MSR_MC0_CTL
3934 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3935 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3936 if ((offset & 0x3) != 0
3937 || (val == 0 || val == ~(uint64_t)0))
3938 env->mce_banks[offset] = val;
3939 break;
3940 }
3941 /* XXX: exception ? */
3942# endif
3943 break;
3944 }
3945
3946# ifdef VBOX
3947 /* call CPUM. */
3948 if (cpu_wrmsr(env, (uint32_t)ECX, val) != 0)
3949 {
3950 /** @todo be a brave man and raise a \#GP(0) here as we should... */
3951 }
3952# endif
3953}
3954
3955void helper_rdmsr(void)
3956{
3957 uint64_t val;
3958
3959 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3960
3961 switch((uint32_t)ECX) {
3962 case MSR_IA32_SYSENTER_CS:
3963 val = env->sysenter_cs;
3964 break;
3965 case MSR_IA32_SYSENTER_ESP:
3966 val = env->sysenter_esp;
3967 break;
3968 case MSR_IA32_SYSENTER_EIP:
3969 val = env->sysenter_eip;
3970 break;
3971 case MSR_IA32_APICBASE:
3972#ifndef VBOX
3973 val = cpu_get_apic_base(env->apic_state);
3974#else /* VBOX */
3975 val = cpu_get_apic_base(env);
3976#endif /* VBOX */
3977 break;
3978 case MSR_EFER:
3979 val = env->efer;
3980 break;
3981 case MSR_STAR:
3982 val = env->star;
3983 break;
3984 case MSR_PAT:
3985 val = env->pat;
3986 break;
3987 case MSR_VM_HSAVE_PA:
3988 val = env->vm_hsave;
3989 break;
3990# ifndef VBOX /* forward to CPUMQueryGuestMsr. */
3991 case MSR_IA32_PERF_STATUS:
3992 /* tsc_increment_by_tick */
3993 val = 1000ULL;
3994 /* CPU multiplier */
3995 val |= (((uint64_t)4ULL) << 40);
3996 break;
3997# endif /* !VBOX */
3998#ifdef TARGET_X86_64
3999 case MSR_LSTAR:
4000 val = env->lstar;
4001 break;
4002 case MSR_CSTAR:
4003 val = env->cstar;
4004 break;
4005 case MSR_FMASK:
4006 val = env->fmask;
4007 break;
4008 case MSR_FSBASE:
4009 val = env->segs[R_FS].base;
4010 break;
4011 case MSR_GSBASE:
4012 val = env->segs[R_GS].base;
4013 break;
4014 case MSR_KERNELGSBASE:
4015 val = env->kernelgsbase;
4016 break;
4017# ifndef VBOX
4018 case MSR_TSC_AUX:
4019 val = env->tsc_aux;
4020 break;
4021# endif /*!VBOX*/
4022#endif
4023# ifndef VBOX
4024 case MSR_MTRRphysBase(0):
4025 case MSR_MTRRphysBase(1):
4026 case MSR_MTRRphysBase(2):
4027 case MSR_MTRRphysBase(3):
4028 case MSR_MTRRphysBase(4):
4029 case MSR_MTRRphysBase(5):
4030 case MSR_MTRRphysBase(6):
4031 case MSR_MTRRphysBase(7):
4032 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
4033 break;
4034 case MSR_MTRRphysMask(0):
4035 case MSR_MTRRphysMask(1):
4036 case MSR_MTRRphysMask(2):
4037 case MSR_MTRRphysMask(3):
4038 case MSR_MTRRphysMask(4):
4039 case MSR_MTRRphysMask(5):
4040 case MSR_MTRRphysMask(6):
4041 case MSR_MTRRphysMask(7):
4042 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
4043 break;
4044 case MSR_MTRRfix64K_00000:
4045 val = env->mtrr_fixed[0];
4046 break;
4047 case MSR_MTRRfix16K_80000:
4048 case MSR_MTRRfix16K_A0000:
4049 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
4050 break;
4051 case MSR_MTRRfix4K_C0000:
4052 case MSR_MTRRfix4K_C8000:
4053 case MSR_MTRRfix4K_D0000:
4054 case MSR_MTRRfix4K_D8000:
4055 case MSR_MTRRfix4K_E0000:
4056 case MSR_MTRRfix4K_E8000:
4057 case MSR_MTRRfix4K_F0000:
4058 case MSR_MTRRfix4K_F8000:
4059 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
4060 break;
4061 case MSR_MTRRdefType:
4062 val = env->mtrr_deftype;
4063 break;
4064 case MSR_MTRRcap:
4065 if (env->cpuid_features & CPUID_MTRR)
4066 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
4067 else
4068 /* XXX: exception ? */
4069 val = 0;
4070 break;
4071 case MSR_MCG_CAP:
4072 val = env->mcg_cap;
4073 break;
4074 case MSR_MCG_CTL:
4075 if (env->mcg_cap & MCG_CTL_P)
4076 val = env->mcg_ctl;
4077 else
4078 val = 0;
4079 break;
4080 case MSR_MCG_STATUS:
4081 val = env->mcg_status;
4082 break;
4083# endif /* !VBOX */
4084 default:
4085# ifndef VBOX
4086 if ((uint32_t)ECX >= MSR_MC0_CTL
4087 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
4088 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
4089 val = env->mce_banks[offset];
4090 break;
4091 }
4092 /* XXX: exception ? */
4093 val = 0;
4094# else /* VBOX */
4095 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4096 {
4097 /** @todo be a brave man and raise a \#GP(0) here as we should... */
4098 val = 0;
4099 }
4100# endif /* VBOX */
4101 break;
4102 }
4103 EAX = (uint32_t)(val);
4104 EDX = (uint32_t)(val >> 32);
4105
4106# ifdef VBOX_STRICT
4107 if ((uint32_t)ECX != MSR_IA32_TSC) {
4108 if (cpu_rdmsr(env, (uint32_t)ECX, &val) != 0)
4109 val = 0;
4110 AssertMsg(val == RT_MAKE_U64(EAX, EDX), ("idMsr=%#x val=%#llx eax:edx=%#llx\n", (uint32_t)ECX, val, RT_MAKE_U64(EAX, EDX)));
4111 }
4112# endif
4113}
4114#endif
4115
4116target_ulong helper_lsl(target_ulong selector1)
4117{
4118 unsigned int limit;
4119 uint32_t e1, e2, eflags, selector;
4120 int rpl, dpl, cpl, type;
4121
4122 selector = selector1 & 0xffff;
4123 eflags = helper_cc_compute_all(CC_OP);
4124 if ((selector & 0xfffc) == 0)
4125 goto fail;
4126 if (load_segment(&e1, &e2, selector) != 0)
4127 goto fail;
4128 rpl = selector & 3;
4129 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4130 cpl = env->hflags & HF_CPL_MASK;
4131 if (e2 & DESC_S_MASK) {
4132 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4133 /* conforming */
4134 } else {
4135 if (dpl < cpl || dpl < rpl)
4136 goto fail;
4137 }
4138 } else {
4139 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4140 switch(type) {
4141 case 1:
4142 case 2:
4143 case 3:
4144 case 9:
4145 case 11:
4146 break;
4147 default:
4148 goto fail;
4149 }
4150 if (dpl < cpl || dpl < rpl) {
4151 fail:
4152 CC_SRC = eflags & ~CC_Z;
4153 return 0;
4154 }
4155 }
4156 limit = get_seg_limit(e1, e2);
4157 CC_SRC = eflags | CC_Z;
4158 return limit;
4159}
4160
4161target_ulong helper_lar(target_ulong selector1)
4162{
4163 uint32_t e1, e2, eflags, selector;
4164 int rpl, dpl, cpl, type;
4165
4166 selector = selector1 & 0xffff;
4167 eflags = helper_cc_compute_all(CC_OP);
4168 if ((selector & 0xfffc) == 0)
4169 goto fail;
4170 if (load_segment(&e1, &e2, selector) != 0)
4171 goto fail;
4172 rpl = selector & 3;
4173 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4174 cpl = env->hflags & HF_CPL_MASK;
4175 if (e2 & DESC_S_MASK) {
4176 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
4177 /* conforming */
4178 } else {
4179 if (dpl < cpl || dpl < rpl)
4180 goto fail;
4181 }
4182 } else {
4183 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4184 switch(type) {
4185 case 1:
4186 case 2:
4187 case 3:
4188 case 4:
4189 case 5:
4190 case 9:
4191 case 11:
4192 case 12:
4193 break;
4194 default:
4195 goto fail;
4196 }
4197 if (dpl < cpl || dpl < rpl) {
4198 fail:
4199 CC_SRC = eflags & ~CC_Z;
4200 return 0;
4201 }
4202 }
4203 CC_SRC = eflags | CC_Z;
4204#ifdef VBOX /* AMD says 0x00ffff00, while intel says 0x00fxff00. Bochs and IEM does like AMD says (x=f). */
4205 return e2 & 0x00ffff00;
4206#else
4207 return e2 & 0x00f0ff00;
4208#endif
4209}
4210
4211void helper_verr(target_ulong selector1)
4212{
4213 uint32_t e1, e2, eflags, selector;
4214 int rpl, dpl, cpl;
4215
4216 selector = selector1 & 0xffff;
4217 eflags = helper_cc_compute_all(CC_OP);
4218 if ((selector & 0xfffc) == 0)
4219 goto fail;
4220 if (load_segment(&e1, &e2, selector) != 0)
4221 goto fail;
4222 if (!(e2 & DESC_S_MASK))
4223 goto fail;
4224 rpl = selector & 3;
4225 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4226 cpl = env->hflags & HF_CPL_MASK;
4227 if (e2 & DESC_CS_MASK) {
4228 if (!(e2 & DESC_R_MASK))
4229 goto fail;
4230 if (!(e2 & DESC_C_MASK)) {
4231 if (dpl < cpl || dpl < rpl)
4232 goto fail;
4233 }
4234 } else {
4235 if (dpl < cpl || dpl < rpl) {
4236 fail:
4237 CC_SRC = eflags & ~CC_Z;
4238 return;
4239 }
4240 }
4241 CC_SRC = eflags | CC_Z;
4242}
4243
4244void helper_verw(target_ulong selector1)
4245{
4246 uint32_t e1, e2, eflags, selector;
4247 int rpl, dpl, cpl;
4248
4249 selector = selector1 & 0xffff;
4250 eflags = helper_cc_compute_all(CC_OP);
4251 if ((selector & 0xfffc) == 0)
4252 goto fail;
4253 if (load_segment(&e1, &e2, selector) != 0)
4254 goto fail;
4255 if (!(e2 & DESC_S_MASK))
4256 goto fail;
4257 rpl = selector & 3;
4258 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
4259 cpl = env->hflags & HF_CPL_MASK;
4260 if (e2 & DESC_CS_MASK) {
4261 goto fail;
4262 } else {
4263 if (dpl < cpl || dpl < rpl)
4264 goto fail;
4265 if (!(e2 & DESC_W_MASK)) {
4266 fail:
4267 CC_SRC = eflags & ~CC_Z;
4268 return;
4269 }
4270 }
4271 CC_SRC = eflags | CC_Z;
4272}
4273
4274/* x87 FPU helpers */
4275
4276static void fpu_set_exception(int mask)
4277{
4278 env->fpus |= mask;
4279 if (env->fpus & (~env->fpuc & FPUC_EM))
4280 env->fpus |= FPUS_SE | FPUS_B;
4281}
4282
4283static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
4284{
4285 if (b == 0.0)
4286 fpu_set_exception(FPUS_ZE);
4287 return a / b;
4288}
4289
4290static void fpu_raise_exception(void)
4291{
4292 if (env->cr[0] & CR0_NE_MASK) {
4293 raise_exception(EXCP10_COPR);
4294 }
4295#if !defined(CONFIG_USER_ONLY)
4296 else {
4297 cpu_set_ferr(env);
4298 }
4299#endif
4300}
4301
4302void helper_flds_FT0(uint32_t val)
4303{
4304 union {
4305 float32 f;
4306 uint32_t i;
4307 } u;
4308 u.i = val;
4309 FT0 = float32_to_floatx(u.f, &env->fp_status);
4310}
4311
4312void helper_fldl_FT0(uint64_t val)
4313{
4314 union {
4315 float64 f;
4316 uint64_t i;
4317 } u;
4318 u.i = val;
4319 FT0 = float64_to_floatx(u.f, &env->fp_status);
4320}
4321
4322void helper_fildl_FT0(int32_t val)
4323{
4324 FT0 = int32_to_floatx(val, &env->fp_status);
4325}
4326
4327void helper_flds_ST0(uint32_t val)
4328{
4329 int new_fpstt;
4330 union {
4331 float32 f;
4332 uint32_t i;
4333 } u;
4334 new_fpstt = (env->fpstt - 1) & 7;
4335 u.i = val;
4336 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
4337 env->fpstt = new_fpstt;
4338 env->fptags[new_fpstt] = 0; /* validate stack entry */
4339}
4340
4341void helper_fldl_ST0(uint64_t val)
4342{
4343 int new_fpstt;
4344 union {
4345 float64 f;
4346 uint64_t i;
4347 } u;
4348 new_fpstt = (env->fpstt - 1) & 7;
4349 u.i = val;
4350 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
4351 env->fpstt = new_fpstt;
4352 env->fptags[new_fpstt] = 0; /* validate stack entry */
4353}
4354
4355void helper_fildl_ST0(int32_t val)
4356{
4357 int new_fpstt;
4358 new_fpstt = (env->fpstt - 1) & 7;
4359 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
4360 env->fpstt = new_fpstt;
4361 env->fptags[new_fpstt] = 0; /* validate stack entry */
4362}
4363
4364void helper_fildll_ST0(int64_t val)
4365{
4366 int new_fpstt;
4367 new_fpstt = (env->fpstt - 1) & 7;
4368 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
4369 env->fpstt = new_fpstt;
4370 env->fptags[new_fpstt] = 0; /* validate stack entry */
4371}
4372
4373#ifndef VBOX
4374uint32_t helper_fsts_ST0(void)
4375#else
4376RTCCUINTREG helper_fsts_ST0(void)
4377#endif
4378{
4379 union {
4380 float32 f;
4381 uint32_t i;
4382 } u;
4383 u.f = floatx_to_float32(ST0, &env->fp_status);
4384 return u.i;
4385}
4386
4387uint64_t helper_fstl_ST0(void)
4388{
4389 union {
4390 float64 f;
4391 uint64_t i;
4392 } u;
4393 u.f = floatx_to_float64(ST0, &env->fp_status);
4394 return u.i;
4395}
4396
4397#ifndef VBOX
4398int32_t helper_fist_ST0(void)
4399#else
4400RTCCINTREG helper_fist_ST0(void)
4401#endif
4402{
4403 int32_t val;
4404 val = floatx_to_int32(ST0, &env->fp_status);
4405 if (val != (int16_t)val)
4406 val = -32768;
4407 return val;
4408}
4409
4410#ifndef VBOX
4411int32_t helper_fistl_ST0(void)
4412#else
4413RTCCINTREG helper_fistl_ST0(void)
4414#endif
4415{
4416 int32_t val;
4417 val = floatx_to_int32(ST0, &env->fp_status);
4418 return val;
4419}
4420
4421int64_t helper_fistll_ST0(void)
4422{
4423 int64_t val;
4424 val = floatx_to_int64(ST0, &env->fp_status);
4425 return val;
4426}
4427
4428#ifndef VBOX
4429int32_t helper_fistt_ST0(void)
4430#else
4431RTCCINTREG helper_fistt_ST0(void)
4432#endif
4433{
4434 int32_t val;
4435 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4436 if (val != (int16_t)val)
4437 val = -32768;
4438 return val;
4439}
4440
4441#ifndef VBOX
4442int32_t helper_fisttl_ST0(void)
4443#else
4444RTCCINTREG helper_fisttl_ST0(void)
4445#endif
4446{
4447 int32_t val;
4448 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
4449 return val;
4450}
4451
4452int64_t helper_fisttll_ST0(void)
4453{
4454 int64_t val;
4455 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
4456 return val;
4457}
4458
4459void helper_fldt_ST0(target_ulong ptr)
4460{
4461 int new_fpstt;
4462 new_fpstt = (env->fpstt - 1) & 7;
4463 env->fpregs[new_fpstt].d = helper_fldt(ptr);
4464 env->fpstt = new_fpstt;
4465 env->fptags[new_fpstt] = 0; /* validate stack entry */
4466}
4467
4468void helper_fstt_ST0(target_ulong ptr)
4469{
4470 helper_fstt(ST0, ptr);
4471}
4472
4473void helper_fpush(void)
4474{
4475 fpush();
4476}
4477
4478void helper_fpop(void)
4479{
4480 fpop();
4481}
4482
4483void helper_fdecstp(void)
4484{
4485 env->fpstt = (env->fpstt - 1) & 7;
4486 env->fpus &= (~0x4700);
4487}
4488
4489void helper_fincstp(void)
4490{
4491 env->fpstt = (env->fpstt + 1) & 7;
4492 env->fpus &= (~0x4700);
4493}
4494
4495/* FPU move */
4496
4497void helper_ffree_STN(int st_index)
4498{
4499 env->fptags[(env->fpstt + st_index) & 7] = 1;
4500}
4501
4502void helper_fmov_ST0_FT0(void)
4503{
4504 ST0 = FT0;
4505}
4506
4507void helper_fmov_FT0_STN(int st_index)
4508{
4509 FT0 = ST(st_index);
4510}
4511
4512void helper_fmov_ST0_STN(int st_index)
4513{
4514 ST0 = ST(st_index);
4515}
4516
4517void helper_fmov_STN_ST0(int st_index)
4518{
4519 ST(st_index) = ST0;
4520}
4521
4522void helper_fxchg_ST0_STN(int st_index)
4523{
4524 CPU86_LDouble tmp;
4525 tmp = ST(st_index);
4526 ST(st_index) = ST0;
4527 ST0 = tmp;
4528}
4529
4530/* FPU operations */
4531
4532static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
4533
4534void helper_fcom_ST0_FT0(void)
4535{
4536 int ret;
4537
4538 ret = floatx_compare(ST0, FT0, &env->fp_status);
4539 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
4540}
4541
4542void helper_fucom_ST0_FT0(void)
4543{
4544 int ret;
4545
4546 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4547 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
4548}
4549
4550static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
4551
4552void helper_fcomi_ST0_FT0(void)
4553{
4554 int eflags;
4555 int ret;
4556
4557 ret = floatx_compare(ST0, FT0, &env->fp_status);
4558 eflags = helper_cc_compute_all(CC_OP);
4559 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4560 CC_SRC = eflags;
4561}
4562
4563void helper_fucomi_ST0_FT0(void)
4564{
4565 int eflags;
4566 int ret;
4567
4568 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
4569 eflags = helper_cc_compute_all(CC_OP);
4570 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
4571 CC_SRC = eflags;
4572}
4573
4574void helper_fadd_ST0_FT0(void)
4575{
4576 ST0 += FT0;
4577}
4578
4579void helper_fmul_ST0_FT0(void)
4580{
4581 ST0 *= FT0;
4582}
4583
4584void helper_fsub_ST0_FT0(void)
4585{
4586 ST0 -= FT0;
4587}
4588
4589void helper_fsubr_ST0_FT0(void)
4590{
4591 ST0 = FT0 - ST0;
4592}
4593
4594void helper_fdiv_ST0_FT0(void)
4595{
4596 ST0 = helper_fdiv(ST0, FT0);
4597}
4598
4599void helper_fdivr_ST0_FT0(void)
4600{
4601 ST0 = helper_fdiv(FT0, ST0);
4602}
4603
4604/* fp operations between STN and ST0 */
4605
4606void helper_fadd_STN_ST0(int st_index)
4607{
4608 ST(st_index) += ST0;
4609}
4610
4611void helper_fmul_STN_ST0(int st_index)
4612{
4613 ST(st_index) *= ST0;
4614}
4615
4616void helper_fsub_STN_ST0(int st_index)
4617{
4618 ST(st_index) -= ST0;
4619}
4620
4621void helper_fsubr_STN_ST0(int st_index)
4622{
4623 CPU86_LDouble *p;
4624 p = &ST(st_index);
4625 *p = ST0 - *p;
4626}
4627
4628void helper_fdiv_STN_ST0(int st_index)
4629{
4630 CPU86_LDouble *p;
4631 p = &ST(st_index);
4632 *p = helper_fdiv(*p, ST0);
4633}
4634
4635void helper_fdivr_STN_ST0(int st_index)
4636{
4637 CPU86_LDouble *p;
4638 p = &ST(st_index);
4639 *p = helper_fdiv(ST0, *p);
4640}
4641
4642/* misc FPU operations */
4643void helper_fchs_ST0(void)
4644{
4645 ST0 = floatx_chs(ST0);
4646}
4647
4648void helper_fabs_ST0(void)
4649{
4650 ST0 = floatx_abs(ST0);
4651}
4652
4653void helper_fld1_ST0(void)
4654{
4655 ST0 = f15rk[1];
4656}
4657
4658void helper_fldl2t_ST0(void)
4659{
4660 ST0 = f15rk[6];
4661}
4662
4663void helper_fldl2e_ST0(void)
4664{
4665 ST0 = f15rk[5];
4666}
4667
4668void helper_fldpi_ST0(void)
4669{
4670 ST0 = f15rk[2];
4671}
4672
4673void helper_fldlg2_ST0(void)
4674{
4675 ST0 = f15rk[3];
4676}
4677
4678void helper_fldln2_ST0(void)
4679{
4680 ST0 = f15rk[4];
4681}
4682
4683void helper_fldz_ST0(void)
4684{
4685 ST0 = f15rk[0];
4686}
4687
4688void helper_fldz_FT0(void)
4689{
4690 FT0 = f15rk[0];
4691}
4692
4693#ifndef VBOX
4694uint32_t helper_fnstsw(void)
4695#else
4696RTCCUINTREG helper_fnstsw(void)
4697#endif
4698{
4699 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4700}
4701
4702#ifndef VBOX
4703uint32_t helper_fnstcw(void)
4704#else
4705RTCCUINTREG helper_fnstcw(void)
4706#endif
4707{
4708 return env->fpuc;
4709}
4710
4711static void update_fp_status(void)
4712{
4713 int rnd_type;
4714
4715 /* set rounding mode */
4716 switch(env->fpuc & RC_MASK) {
4717 default:
4718 case RC_NEAR:
4719 rnd_type = float_round_nearest_even;
4720 break;
4721 case RC_DOWN:
4722 rnd_type = float_round_down;
4723 break;
4724 case RC_UP:
4725 rnd_type = float_round_up;
4726 break;
4727 case RC_CHOP:
4728 rnd_type = float_round_to_zero;
4729 break;
4730 }
4731 set_float_rounding_mode(rnd_type, &env->fp_status);
4732#ifdef FLOATX80
4733 switch((env->fpuc >> 8) & 3) {
4734 case 0:
4735 rnd_type = 32;
4736 break;
4737 case 2:
4738 rnd_type = 64;
4739 break;
4740 case 3:
4741 default:
4742 rnd_type = 80;
4743 break;
4744 }
4745 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4746#endif
4747}
4748
4749void helper_fldcw(uint32_t val)
4750{
4751 env->fpuc = val;
4752 update_fp_status();
4753}
4754
4755void helper_fclex(void)
4756{
4757 env->fpus &= 0x7f00;
4758}
4759
4760void helper_fwait(void)
4761{
4762 if (env->fpus & FPUS_SE)
4763 fpu_raise_exception();
4764}
4765
4766void helper_fninit(void)
4767{
4768 env->fpus = 0;
4769 env->fpstt = 0;
4770 env->fpuc = 0x37f;
4771 env->fptags[0] = 1;
4772 env->fptags[1] = 1;
4773 env->fptags[2] = 1;
4774 env->fptags[3] = 1;
4775 env->fptags[4] = 1;
4776 env->fptags[5] = 1;
4777 env->fptags[6] = 1;
4778 env->fptags[7] = 1;
4779}
4780
4781/* BCD ops */
4782
4783void helper_fbld_ST0(target_ulong ptr)
4784{
4785 CPU86_LDouble tmp;
4786 uint64_t val;
4787 unsigned int v;
4788 int i;
4789
4790 val = 0;
4791 for(i = 8; i >= 0; i--) {
4792 v = ldub(ptr + i);
4793 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4794 }
4795 tmp = val;
4796 if (ldub(ptr + 9) & 0x80)
4797 tmp = -tmp;
4798 fpush();
4799 ST0 = tmp;
4800}
4801
4802void helper_fbst_ST0(target_ulong ptr)
4803{
4804 int v;
4805 target_ulong mem_ref, mem_end;
4806 int64_t val;
4807
4808 val = floatx_to_int64(ST0, &env->fp_status);
4809 mem_ref = ptr;
4810 mem_end = mem_ref + 9;
4811 if (val < 0) {
4812 stb(mem_end, 0x80);
4813 val = -val;
4814 } else {
4815 stb(mem_end, 0x00);
4816 }
4817 while (mem_ref < mem_end) {
4818 if (val == 0)
4819 break;
4820 v = val % 100;
4821 val = val / 100;
4822 v = ((v / 10) << 4) | (v % 10);
4823 stb(mem_ref++, v);
4824 }
4825 while (mem_ref < mem_end) {
4826 stb(mem_ref++, 0);
4827 }
4828}
4829
4830void helper_f2xm1(void)
4831{
4832 ST0 = pow(2.0,ST0) - 1.0;
4833}
4834
4835void helper_fyl2x(void)
4836{
4837 CPU86_LDouble fptemp;
4838
4839 fptemp = ST0;
4840 if (fptemp>0.0){
4841 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4842 ST1 *= fptemp;
4843 fpop();
4844 } else {
4845 env->fpus &= (~0x4700);
4846 env->fpus |= 0x400;
4847 }
4848}
4849
4850void helper_fptan(void)
4851{
4852 CPU86_LDouble fptemp;
4853
4854 fptemp = ST0;
4855 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4856 env->fpus |= 0x400;
4857 } else {
4858 ST0 = tan(fptemp);
4859 fpush();
4860 ST0 = 1.0;
4861 env->fpus &= (~0x400); /* C2 <-- 0 */
4862 /* the above code is for |arg| < 2**52 only */
4863 }
4864}
4865
4866void helper_fpatan(void)
4867{
4868 CPU86_LDouble fptemp, fpsrcop;
4869
4870 fpsrcop = ST1;
4871 fptemp = ST0;
4872 ST1 = atan2(fpsrcop,fptemp);
4873 fpop();
4874}
4875
4876void helper_fxtract(void)
4877{
4878 CPU86_LDoubleU temp;
4879 unsigned int expdif;
4880
4881 temp.d = ST0;
4882 expdif = EXPD(temp) - EXPBIAS;
4883 /*DP exponent bias*/
4884 ST0 = expdif;
4885 fpush();
4886 BIASEXPONENT(temp);
4887 ST0 = temp.d;
4888}
4889
4890void helper_fprem1(void)
4891{
4892 CPU86_LDouble dblq, fpsrcop, fptemp;
4893 CPU86_LDoubleU fpsrcop1, fptemp1;
4894 int expdif;
4895 signed long long int q;
4896
4897#ifndef VBOX /* Unfortunately, we cannot handle isinf/isnan easily in wrapper */
4898 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4899#else
4900 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4901#endif
4902 ST0 = 0.0 / 0.0; /* NaN */
4903 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4904 return;
4905 }
4906
4907 fpsrcop = ST0;
4908 fptemp = ST1;
4909 fpsrcop1.d = fpsrcop;
4910 fptemp1.d = fptemp;
4911 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4912
4913 if (expdif < 0) {
4914 /* optimisation? taken from the AMD docs */
4915 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4916 /* ST0 is unchanged */
4917 return;
4918 }
4919
4920 if (expdif < 53) {
4921 dblq = fpsrcop / fptemp;
4922 /* round dblq towards nearest integer */
4923 dblq = rint(dblq);
4924 ST0 = fpsrcop - fptemp * dblq;
4925
4926 /* convert dblq to q by truncating towards zero */
4927 if (dblq < 0.0)
4928 q = (signed long long int)(-dblq);
4929 else
4930 q = (signed long long int)dblq;
4931
4932 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4933 /* (C0,C3,C1) <-- (q2,q1,q0) */
4934 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4935 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4936 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4937 } else {
4938 env->fpus |= 0x400; /* C2 <-- 1 */
4939 fptemp = pow(2.0, expdif - 50);
4940 fpsrcop = (ST0 / ST1) / fptemp;
4941 /* fpsrcop = integer obtained by chopping */
4942 fpsrcop = (fpsrcop < 0.0) ?
4943 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4944 ST0 -= (ST1 * fpsrcop * fptemp);
4945 }
4946}
4947
4948void helper_fprem(void)
4949{
4950 CPU86_LDouble dblq, fpsrcop, fptemp;
4951 CPU86_LDoubleU fpsrcop1, fptemp1;
4952 int expdif;
4953 signed long long int q;
4954
4955#ifndef VBOX /* Unfortunately, we cannot easily handle isinf/isnan in wrapper */
4956 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4957#else
4958 if ((ST0 != ST0) || (ST1 != ST1) || (ST1 == 0.0)) {
4959#endif
4960 ST0 = 0.0 / 0.0; /* NaN */
4961 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4962 return;
4963 }
4964
4965 fpsrcop = (CPU86_LDouble)ST0;
4966 fptemp = (CPU86_LDouble)ST1;
4967 fpsrcop1.d = fpsrcop;
4968 fptemp1.d = fptemp;
4969 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4970
4971 if (expdif < 0) {
4972 /* optimisation? taken from the AMD docs */
4973 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4974 /* ST0 is unchanged */
4975 return;
4976 }
4977
4978 if ( expdif < 53 ) {
4979 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4980 /* round dblq towards zero */
4981 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4982 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4983
4984 /* convert dblq to q by truncating towards zero */
4985 if (dblq < 0.0)
4986 q = (signed long long int)(-dblq);
4987 else
4988 q = (signed long long int)dblq;
4989
4990 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4991 /* (C0,C3,C1) <-- (q2,q1,q0) */
4992 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4993 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4994 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4995 } else {
4996 int N = 32 + (expdif % 32); /* as per AMD docs */
4997 env->fpus |= 0x400; /* C2 <-- 1 */
4998 fptemp = pow(2.0, (double)(expdif - N));
4999 fpsrcop = (ST0 / ST1) / fptemp;
5000 /* fpsrcop = integer obtained by chopping */
5001 fpsrcop = (fpsrcop < 0.0) ?
5002 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
5003 ST0 -= (ST1 * fpsrcop * fptemp);
5004 }
5005}
5006
5007void helper_fyl2xp1(void)
5008{
5009 CPU86_LDouble fptemp;
5010
5011 fptemp = ST0;
5012 if ((fptemp+1.0)>0.0) {
5013 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
5014 ST1 *= fptemp;
5015 fpop();
5016 } else {
5017 env->fpus &= (~0x4700);
5018 env->fpus |= 0x400;
5019 }
5020}
5021
5022void helper_fsqrt(void)
5023{
5024 CPU86_LDouble fptemp;
5025
5026 fptemp = ST0;
5027 if (fptemp<0.0) {
5028 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5029 env->fpus |= 0x400;
5030 }
5031 ST0 = sqrt(fptemp);
5032}
5033
5034void helper_fsincos(void)
5035{
5036 CPU86_LDouble fptemp;
5037
5038 fptemp = ST0;
5039 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5040 env->fpus |= 0x400;
5041 } else {
5042 ST0 = sin(fptemp);
5043 fpush();
5044 ST0 = cos(fptemp);
5045 env->fpus &= (~0x400); /* C2 <-- 0 */
5046 /* the above code is for |arg| < 2**63 only */
5047 }
5048}
5049
5050void helper_frndint(void)
5051{
5052 ST0 = floatx_round_to_int(ST0, &env->fp_status);
5053}
5054
5055void helper_fscale(void)
5056{
5057 ST0 = ldexp (ST0, (int)(ST1));
5058}
5059
5060void helper_fsin(void)
5061{
5062 CPU86_LDouble fptemp;
5063
5064 fptemp = ST0;
5065 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5066 env->fpus |= 0x400;
5067 } else {
5068 ST0 = sin(fptemp);
5069 env->fpus &= (~0x400); /* C2 <-- 0 */
5070 /* the above code is for |arg| < 2**53 only */
5071 }
5072}
5073
5074void helper_fcos(void)
5075{
5076 CPU86_LDouble fptemp;
5077
5078 fptemp = ST0;
5079 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
5080 env->fpus |= 0x400;
5081 } else {
5082 ST0 = cos(fptemp);
5083 env->fpus &= (~0x400); /* C2 <-- 0 */
5084 /* the above code is for |arg5 < 2**63 only */
5085 }
5086}
5087
5088void helper_fxam_ST0(void)
5089{
5090 CPU86_LDoubleU temp;
5091 int expdif;
5092
5093 temp.d = ST0;
5094
5095 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
5096 if (SIGND(temp))
5097 env->fpus |= 0x200; /* C1 <-- 1 */
5098
5099 /* XXX: test fptags too */
5100 expdif = EXPD(temp);
5101 if (expdif == MAXEXPD) {
5102#ifdef USE_X86LDOUBLE
5103 if (MANTD(temp) == 0x8000000000000000ULL)
5104#else
5105 if (MANTD(temp) == 0)
5106#endif
5107 env->fpus |= 0x500 /*Infinity*/;
5108 else
5109 env->fpus |= 0x100 /*NaN*/;
5110 } else if (expdif == 0) {
5111 if (MANTD(temp) == 0)
5112 env->fpus |= 0x4000 /*Zero*/;
5113 else
5114 env->fpus |= 0x4400 /*Denormal*/;
5115 } else {
5116 env->fpus |= 0x400;
5117 }
5118}
5119
5120void helper_fstenv(target_ulong ptr, int data32)
5121{
5122 int fpus, fptag, exp, i;
5123 uint64_t mant;
5124 CPU86_LDoubleU tmp;
5125
5126 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5127 fptag = 0;
5128 for (i=7; i>=0; i--) {
5129 fptag <<= 2;
5130 if (env->fptags[i]) {
5131 fptag |= 3;
5132 } else {
5133 tmp.d = env->fpregs[i].d;
5134 exp = EXPD(tmp);
5135 mant = MANTD(tmp);
5136 if (exp == 0 && mant == 0) {
5137 /* zero */
5138 fptag |= 1;
5139 } else if (exp == 0 || exp == MAXEXPD
5140#ifdef USE_X86LDOUBLE
5141 || (mant & (1LL << 63)) == 0
5142#endif
5143 ) {
5144 /* NaNs, infinity, denormal */
5145 fptag |= 2;
5146 }
5147 }
5148 }
5149 if (data32) {
5150 /* 32 bit */
5151 stl(ptr, env->fpuc);
5152 stl(ptr + 4, fpus);
5153 stl(ptr + 8, fptag);
5154 stl(ptr + 12, 0); /* fpip */
5155 stl(ptr + 16, 0); /* fpcs */
5156 stl(ptr + 20, 0); /* fpoo */
5157 stl(ptr + 24, 0); /* fpos */
5158 } else {
5159 /* 16 bit */
5160 stw(ptr, env->fpuc);
5161 stw(ptr + 2, fpus);
5162 stw(ptr + 4, fptag);
5163 stw(ptr + 6, 0);
5164 stw(ptr + 8, 0);
5165 stw(ptr + 10, 0);
5166 stw(ptr + 12, 0);
5167 }
5168}
5169
5170void helper_fldenv(target_ulong ptr, int data32)
5171{
5172 int i, fpus, fptag;
5173
5174 if (data32) {
5175 env->fpuc = lduw(ptr);
5176 fpus = lduw(ptr + 4);
5177 fptag = lduw(ptr + 8);
5178 }
5179 else {
5180 env->fpuc = lduw(ptr);
5181 fpus = lduw(ptr + 2);
5182 fptag = lduw(ptr + 4);
5183 }
5184 env->fpstt = (fpus >> 11) & 7;
5185 env->fpus = fpus & ~0x3800;
5186 for(i = 0;i < 8; i++) {
5187 env->fptags[i] = ((fptag & 3) == 3);
5188 fptag >>= 2;
5189 }
5190}
5191
5192void helper_fsave(target_ulong ptr, int data32)
5193{
5194 CPU86_LDouble tmp;
5195 int i;
5196
5197 helper_fstenv(ptr, data32);
5198
5199 ptr += (14 << data32);
5200 for(i = 0;i < 8; i++) {
5201 tmp = ST(i);
5202 helper_fstt(tmp, ptr);
5203 ptr += 10;
5204 }
5205
5206 /* fninit */
5207 env->fpus = 0;
5208 env->fpstt = 0;
5209 env->fpuc = 0x37f;
5210 env->fptags[0] = 1;
5211 env->fptags[1] = 1;
5212 env->fptags[2] = 1;
5213 env->fptags[3] = 1;
5214 env->fptags[4] = 1;
5215 env->fptags[5] = 1;
5216 env->fptags[6] = 1;
5217 env->fptags[7] = 1;
5218}
5219
5220void helper_frstor(target_ulong ptr, int data32)
5221{
5222 CPU86_LDouble tmp;
5223 int i;
5224
5225 helper_fldenv(ptr, data32);
5226 ptr += (14 << data32);
5227
5228 for(i = 0;i < 8; i++) {
5229 tmp = helper_fldt(ptr);
5230 ST(i) = tmp;
5231 ptr += 10;
5232 }
5233}
5234
5235void helper_fxsave(target_ulong ptr, int data64)
5236{
5237 int fpus, fptag, i, nb_xmm_regs;
5238 CPU86_LDouble tmp;
5239 target_ulong addr;
5240
5241 /* The operand must be 16 byte aligned */
5242 if (ptr & 0xf) {
5243 raise_exception(EXCP0D_GPF);
5244 }
5245
5246 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
5247 fptag = 0;
5248 for(i = 0; i < 8; i++) {
5249 fptag |= (env->fptags[i] << i);
5250 }
5251 stw(ptr, env->fpuc);
5252 stw(ptr + 2, fpus);
5253 stw(ptr + 4, fptag ^ 0xff);
5254#ifdef TARGET_X86_64
5255 if (data64) {
5256 stq(ptr + 0x08, 0); /* rip */
5257 stq(ptr + 0x10, 0); /* rdp */
5258 } else
5259#endif
5260 {
5261 stl(ptr + 0x08, 0); /* eip */
5262 stl(ptr + 0x0c, 0); /* sel */
5263 stl(ptr + 0x10, 0); /* dp */
5264 stl(ptr + 0x14, 0); /* sel */
5265 }
5266
5267 addr = ptr + 0x20;
5268 for(i = 0;i < 8; i++) {
5269 tmp = ST(i);
5270 helper_fstt(tmp, addr);
5271 addr += 16;
5272 }
5273
5274 if (env->cr[4] & CR4_OSFXSR_MASK) {
5275 /* XXX: finish it */
5276 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
5277 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
5278 if (env->hflags & HF_CS64_MASK)
5279 nb_xmm_regs = 16;
5280 else
5281 nb_xmm_regs = 8;
5282 addr = ptr + 0xa0;
5283 /* Fast FXSAVE leaves out the XMM registers */
5284 if (!(env->efer & MSR_EFER_FFXSR)
5285 || (env->hflags & HF_CPL_MASK)
5286 || !(env->hflags & HF_LMA_MASK)) {
5287 for(i = 0; i < nb_xmm_regs; i++) {
5288 stq(addr, env->xmm_regs[i].XMM_Q(0));
5289 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
5290 addr += 16;
5291 }
5292 }
5293 }
5294}
5295
5296void helper_fxrstor(target_ulong ptr, int data64)
5297{
5298 int i, fpus, fptag, nb_xmm_regs;
5299 CPU86_LDouble tmp;
5300 target_ulong addr;
5301
5302 /* The operand must be 16 byte aligned */
5303 if (ptr & 0xf) {
5304 raise_exception(EXCP0D_GPF);
5305 }
5306
5307 env->fpuc = lduw(ptr);
5308 fpus = lduw(ptr + 2);
5309 fptag = lduw(ptr + 4);
5310 env->fpstt = (fpus >> 11) & 7;
5311 env->fpus = fpus & ~0x3800;
5312 fptag ^= 0xff;
5313 for(i = 0;i < 8; i++) {
5314 env->fptags[i] = ((fptag >> i) & 1);
5315 }
5316
5317 addr = ptr + 0x20;
5318 for(i = 0;i < 8; i++) {
5319 tmp = helper_fldt(addr);
5320 ST(i) = tmp;
5321 addr += 16;
5322 }
5323
5324 if (env->cr[4] & CR4_OSFXSR_MASK) {
5325 /* XXX: finish it */
5326 env->mxcsr = ldl(ptr + 0x18);
5327 //ldl(ptr + 0x1c);
5328 if (env->hflags & HF_CS64_MASK)
5329 nb_xmm_regs = 16;
5330 else
5331 nb_xmm_regs = 8;
5332 addr = ptr + 0xa0;
5333 /* Fast FXRESTORE leaves out the XMM registers */
5334 if (!(env->efer & MSR_EFER_FFXSR)
5335 || (env->hflags & HF_CPL_MASK)
5336 || !(env->hflags & HF_LMA_MASK)) {
5337 for(i = 0; i < nb_xmm_regs; i++) {
5338#if !defined(VBOX) || __GNUC__ < 4
5339 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
5340 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
5341#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
5342# if 1
5343 env->xmm_regs[i].XMM_L(0) = ldl(addr);
5344 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
5345 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
5346 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
5347# else
5348 /* this works fine on Mac OS X, gcc 4.0.1 */
5349 uint64_t u64 = ldq(addr);
5350 env->xmm_regs[i].XMM_Q(0);
5351 u64 = ldq(addr + 4);
5352 env->xmm_regs[i].XMM_Q(1) = u64;
5353# endif
5354#endif
5355 addr += 16;
5356 }
5357 }
5358 }
5359}
5360
5361#ifndef USE_X86LDOUBLE
5362
5363void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5364{
5365 CPU86_LDoubleU temp;
5366 int e;
5367
5368 temp.d = f;
5369 /* mantissa */
5370 *pmant = (MANTD(temp) << 11) | (1LL << 63);
5371 /* exponent + sign */
5372 e = EXPD(temp) - EXPBIAS + 16383;
5373 e |= SIGND(temp) >> 16;
5374 *pexp = e;
5375}
5376
5377CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5378{
5379 CPU86_LDoubleU temp;
5380 int e;
5381 uint64_t ll;
5382
5383 /* XXX: handle overflow ? */
5384 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
5385 e |= (upper >> 4) & 0x800; /* sign */
5386 ll = (mant >> 11) & ((1LL << 52) - 1);
5387#ifdef __arm__
5388 temp.l.upper = (e << 20) | (ll >> 32);
5389 temp.l.lower = ll;
5390#else
5391 temp.ll = ll | ((uint64_t)e << 52);
5392#endif
5393 return temp.d;
5394}
5395
5396#else
5397
5398void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
5399{
5400 CPU86_LDoubleU temp;
5401
5402 temp.d = f;
5403 *pmant = temp.l.lower;
5404 *pexp = temp.l.upper;
5405}
5406
5407CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
5408{
5409 CPU86_LDoubleU temp;
5410
5411 temp.l.upper = upper;
5412 temp.l.lower = mant;
5413 return temp.d;
5414}
5415#endif
5416
5417#ifdef TARGET_X86_64
5418
5419//#define DEBUG_MULDIV
5420
5421static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
5422{
5423 *plow += a;
5424 /* carry test */
5425 if (*plow < a)
5426 (*phigh)++;
5427 *phigh += b;
5428}
5429
5430static void neg128(uint64_t *plow, uint64_t *phigh)
5431{
5432 *plow = ~ *plow;
5433 *phigh = ~ *phigh;
5434 add128(plow, phigh, 1, 0);
5435}
5436
5437/* return TRUE if overflow */
5438static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
5439{
5440 uint64_t q, r, a1, a0;
5441 int i, qb, ab;
5442
5443 a0 = *plow;
5444 a1 = *phigh;
5445 if (a1 == 0) {
5446 q = a0 / b;
5447 r = a0 % b;
5448 *plow = q;
5449 *phigh = r;
5450 } else {
5451 if (a1 >= b)
5452 return 1;
5453 /* XXX: use a better algorithm */
5454 for(i = 0; i < 64; i++) {
5455 ab = a1 >> 63;
5456 a1 = (a1 << 1) | (a0 >> 63);
5457 if (ab || a1 >= b) {
5458 a1 -= b;
5459 qb = 1;
5460 } else {
5461 qb = 0;
5462 }
5463 a0 = (a0 << 1) | qb;
5464 }
5465#if defined(DEBUG_MULDIV)
5466 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
5467 *phigh, *plow, b, a0, a1);
5468#endif
5469 *plow = a0;
5470 *phigh = a1;
5471 }
5472 return 0;
5473}
5474
5475/* return TRUE if overflow */
5476static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
5477{
5478 int sa, sb;
5479 sa = ((int64_t)*phigh < 0);
5480 if (sa)
5481 neg128(plow, phigh);
5482 sb = (b < 0);
5483 if (sb)
5484 b = -b;
5485 if (div64(plow, phigh, b) != 0)
5486 return 1;
5487 if (sa ^ sb) {
5488 if (*plow > (1ULL << 63))
5489 return 1;
5490 *plow = - *plow;
5491 } else {
5492 if (*plow >= (1ULL << 63))
5493 return 1;
5494 }
5495 if (sa)
5496 *phigh = - *phigh;
5497 return 0;
5498}
5499
5500void helper_mulq_EAX_T0(target_ulong t0)
5501{
5502 uint64_t r0, r1;
5503
5504 mulu64(&r0, &r1, EAX, t0);
5505 EAX = r0;
5506 EDX = r1;
5507 CC_DST = r0;
5508 CC_SRC = r1;
5509}
5510
5511void helper_imulq_EAX_T0(target_ulong t0)
5512{
5513 uint64_t r0, r1;
5514
5515 muls64(&r0, &r1, EAX, t0);
5516 EAX = r0;
5517 EDX = r1;
5518 CC_DST = r0;
5519 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5520}
5521
5522target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
5523{
5524 uint64_t r0, r1;
5525
5526 muls64(&r0, &r1, t0, t1);
5527 CC_DST = r0;
5528 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
5529 return r0;
5530}
5531
5532void helper_divq_EAX(target_ulong t0)
5533{
5534 uint64_t r0, r1;
5535 if (t0 == 0) {
5536 raise_exception(EXCP00_DIVZ);
5537 }
5538 r0 = EAX;
5539 r1 = EDX;
5540 if (div64(&r0, &r1, t0))
5541 raise_exception(EXCP00_DIVZ);
5542 EAX = r0;
5543 EDX = r1;
5544}
5545
5546void helper_idivq_EAX(target_ulong t0)
5547{
5548 uint64_t r0, r1;
5549 if (t0 == 0) {
5550 raise_exception(EXCP00_DIVZ);
5551 }
5552 r0 = EAX;
5553 r1 = EDX;
5554 if (idiv64(&r0, &r1, t0))
5555 raise_exception(EXCP00_DIVZ);
5556 EAX = r0;
5557 EDX = r1;
5558}
5559#endif
5560
5561static void do_hlt(void)
5562{
5563 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
5564 env->halted = 1;
5565 env->exception_index = EXCP_HLT;
5566 cpu_loop_exit();
5567}
5568
5569void helper_hlt(int next_eip_addend)
5570{
5571 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
5572 EIP += next_eip_addend;
5573
5574 do_hlt();
5575}
5576
5577void helper_monitor(target_ulong ptr)
5578{
5579#ifdef VBOX
5580 if ((uint32_t)ECX > 1)
5581 raise_exception(EXCP0D_GPF);
5582#else /* !VBOX */
5583 if ((uint32_t)ECX != 0)
5584 raise_exception(EXCP0D_GPF);
5585#endif /* !VBOX */
5586 /* XXX: store address ? */
5587 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
5588}
5589
5590void helper_mwait(int next_eip_addend)
5591{
5592 if ((uint32_t)ECX != 0)
5593 raise_exception(EXCP0D_GPF);
5594#ifdef VBOX
5595 helper_hlt(next_eip_addend);
5596#else /* !VBOX */
5597 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
5598 EIP += next_eip_addend;
5599
5600 /* XXX: not complete but not completely erroneous */
5601 if (env->cpu_index != 0 || env->next_cpu != NULL) {
5602 /* more than one CPU: do not sleep because another CPU may
5603 wake this one */
5604 } else {
5605 do_hlt();
5606 }
5607#endif /* !VBOX */
5608}
5609
5610void helper_debug(void)
5611{
5612 env->exception_index = EXCP_DEBUG;
5613 cpu_loop_exit();
5614}
5615
5616void helper_reset_rf(void)
5617{
5618 env->eflags &= ~RF_MASK;
5619}
5620
5621void helper_raise_interrupt(int intno, int next_eip_addend)
5622{
5623 raise_interrupt(intno, 1, 0, next_eip_addend);
5624}
5625
5626void helper_raise_exception(int exception_index)
5627{
5628 raise_exception(exception_index);
5629}
5630
5631void helper_cli(void)
5632{
5633 env->eflags &= ~IF_MASK;
5634}
5635
5636void helper_sti(void)
5637{
5638 env->eflags |= IF_MASK;
5639}
5640
5641#ifdef VBOX
5642void helper_cli_vme(void)
5643{
5644 env->eflags &= ~VIF_MASK;
5645}
5646
5647void helper_sti_vme(void)
5648{
5649 /* First check, then change eflags according to the AMD manual */
5650 if (env->eflags & VIP_MASK) {
5651 raise_exception(EXCP0D_GPF);
5652 }
5653 env->eflags |= VIF_MASK;
5654}
5655#endif /* VBOX */
5656
5657#if 0
5658/* vm86plus instructions */
5659void helper_cli_vm(void)
5660{
5661 env->eflags &= ~VIF_MASK;
5662}
5663
5664void helper_sti_vm(void)
5665{
5666 env->eflags |= VIF_MASK;
5667 if (env->eflags & VIP_MASK) {
5668 raise_exception(EXCP0D_GPF);
5669 }
5670}
5671#endif
5672
5673void helper_set_inhibit_irq(void)
5674{
5675 env->hflags |= HF_INHIBIT_IRQ_MASK;
5676}
5677
5678void helper_reset_inhibit_irq(void)
5679{
5680 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5681}
5682
5683void helper_boundw(target_ulong a0, int v)
5684{
5685 int low, high;
5686 low = ldsw(a0);
5687 high = ldsw(a0 + 2);
5688 v = (int16_t)v;
5689 if (v < low || v > high) {
5690 raise_exception(EXCP05_BOUND);
5691 }
5692}
5693
5694void helper_boundl(target_ulong a0, int v)
5695{
5696 int low, high;
5697 low = ldl(a0);
5698 high = ldl(a0 + 4);
5699 if (v < low || v > high) {
5700 raise_exception(EXCP05_BOUND);
5701 }
5702}
5703
5704static float approx_rsqrt(float a)
5705{
5706 return 1.0 / sqrt(a);
5707}
5708
5709static float approx_rcp(float a)
5710{
5711 return 1.0 / a;
5712}
5713
5714#if !defined(CONFIG_USER_ONLY)
5715
5716#define MMUSUFFIX _mmu
5717
5718#define SHIFT 0
5719#include "softmmu_template.h"
5720
5721#define SHIFT 1
5722#include "softmmu_template.h"
5723
5724#define SHIFT 2
5725#include "softmmu_template.h"
5726
5727#define SHIFT 3
5728#include "softmmu_template.h"
5729
5730#endif
5731
5732#if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB)
5733/* This code assumes real physical address always fit into host CPU reg,
5734 which is wrong in general, but true for our current use cases. */
5735RTCCUINTREG REGPARM __ldb_vbox_phys(RTCCUINTREG addr)
5736{
5737 return remR3PhysReadS8(addr);
5738}
5739RTCCUINTREG REGPARM __ldub_vbox_phys(RTCCUINTREG addr)
5740{
5741 return remR3PhysReadU8(addr);
5742}
5743void REGPARM __stb_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5744{
5745 remR3PhysWriteU8(addr, val);
5746}
5747RTCCUINTREG REGPARM __ldw_vbox_phys(RTCCUINTREG addr)
5748{
5749 return remR3PhysReadS16(addr);
5750}
5751RTCCUINTREG REGPARM __lduw_vbox_phys(RTCCUINTREG addr)
5752{
5753 return remR3PhysReadU16(addr);
5754}
5755void REGPARM __stw_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5756{
5757 remR3PhysWriteU16(addr, val);
5758}
5759RTCCUINTREG REGPARM __ldl_vbox_phys(RTCCUINTREG addr)
5760{
5761 return remR3PhysReadS32(addr);
5762}
5763RTCCUINTREG REGPARM __ldul_vbox_phys(RTCCUINTREG addr)
5764{
5765 return remR3PhysReadU32(addr);
5766}
5767void REGPARM __stl_vbox_phys(RTCCUINTREG addr, RTCCUINTREG val)
5768{
5769 remR3PhysWriteU32(addr, val);
5770}
5771uint64_t REGPARM __ldq_vbox_phys(RTCCUINTREG addr)
5772{
5773 return remR3PhysReadU64(addr);
5774}
5775void REGPARM __stq_vbox_phys(RTCCUINTREG addr, uint64_t val)
5776{
5777 remR3PhysWriteU64(addr, val);
5778}
5779#endif /* VBOX */
5780
5781#if !defined(CONFIG_USER_ONLY)
5782/* try to fill the TLB and return an exception if error. If retaddr is
5783 NULL, it means that the function was called in C code (i.e. not
5784 from generated code or from helper.c) */
5785/* XXX: fix it to restore all registers */
5786void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5787{
5788 TranslationBlock *tb;
5789 int ret;
5790 uintptr_t pc;
5791 CPUX86State *saved_env;
5792
5793 /* XXX: hack to restore env in all cases, even if not called from
5794 generated code */
5795 saved_env = env;
5796 env = cpu_single_env;
5797
5798 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
5799 if (ret) {
5800 if (retaddr) {
5801 /* now we have a real cpu fault */
5802 pc = (uintptr_t)retaddr;
5803 tb = tb_find_pc(pc);
5804 if (tb) {
5805 /* the PC is inside the translated code. It means that we have
5806 a virtual CPU fault */
5807 cpu_restore_state(tb, env, pc, NULL);
5808 }
5809 }
5810 raise_exception_err(env->exception_index, env->error_code);
5811 }
5812 env = saved_env;
5813}
5814#endif
5815
5816#ifdef VBOX
5817
5818/**
5819 * Correctly computes the eflags.
5820 * @returns eflags.
5821 * @param env1 CPU environment.
5822 */
5823uint32_t raw_compute_eflags(CPUX86State *env1)
5824{
5825 CPUX86State *savedenv = env;
5826 uint32_t efl;
5827 env = env1;
5828 efl = compute_eflags();
5829 env = savedenv;
5830 return efl;
5831}
5832
5833/**
5834 * Reads byte from virtual address in guest memory area.
5835 * XXX: is it working for any addresses? swapped out pages?
5836 * @returns read data byte.
5837 * @param env1 CPU environment.
5838 * @param pvAddr GC Virtual address.
5839 */
5840uint8_t read_byte(CPUX86State *env1, target_ulong addr)
5841{
5842 CPUX86State *savedenv = env;
5843 uint8_t u8;
5844 env = env1;
5845 u8 = ldub_kernel(addr);
5846 env = savedenv;
5847 return u8;
5848}
5849
5850/**
5851 * Reads byte from virtual address in guest memory area.
5852 * XXX: is it working for any addresses? swapped out pages?
5853 * @returns read data byte.
5854 * @param env1 CPU environment.
5855 * @param pvAddr GC Virtual address.
5856 */
5857uint16_t read_word(CPUX86State *env1, target_ulong addr)
5858{
5859 CPUX86State *savedenv = env;
5860 uint16_t u16;
5861 env = env1;
5862 u16 = lduw_kernel(addr);
5863 env = savedenv;
5864 return u16;
5865}
5866
5867/**
5868 * Reads byte from virtual address in guest memory area.
5869 * XXX: is it working for any addresses? swapped out pages?
5870 * @returns read data byte.
5871 * @param env1 CPU environment.
5872 * @param pvAddr GC Virtual address.
5873 */
5874uint32_t read_dword(CPUX86State *env1, target_ulong addr)
5875{
5876 CPUX86State *savedenv = env;
5877 uint32_t u32;
5878 env = env1;
5879 u32 = ldl_kernel(addr);
5880 env = savedenv;
5881 return u32;
5882}
5883
5884/**
5885 * Writes byte to virtual address in guest memory area.
5886 * XXX: is it working for any addresses? swapped out pages?
5887 * @returns read data byte.
5888 * @param env1 CPU environment.
5889 * @param pvAddr GC Virtual address.
5890 * @param val byte value
5891 */
5892void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
5893{
5894 CPUX86State *savedenv = env;
5895 env = env1;
5896 stb(addr, val);
5897 env = savedenv;
5898}
5899
5900void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
5901{
5902 CPUX86State *savedenv = env;
5903 env = env1;
5904 stw(addr, val);
5905 env = savedenv;
5906}
5907
5908void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
5909{
5910 CPUX86State *savedenv = env;
5911 env = env1;
5912 stl(addr, val);
5913 env = savedenv;
5914}
5915
5916/**
5917 * Correctly loads selector into segment register with updating internal
5918 * qemu data/caches.
5919 * @param env1 CPU environment.
5920 * @param seg_reg Segment register.
5921 * @param selector Selector to load.
5922 */
5923void sync_seg(CPUX86State *env1, int seg_reg, int selector)
5924{
5925 CPUX86State *savedenv = env;
5926#ifdef FORCE_SEGMENT_SYNC
5927 jmp_buf old_buf;
5928#endif
5929
5930 env = env1;
5931
5932 if ( env->eflags & X86_EFL_VM
5933 || !(env->cr[0] & X86_CR0_PE))
5934 {
5935 load_seg_vm(seg_reg, selector);
5936
5937 env = savedenv;
5938
5939 /* Successful sync. */
5940 Assert(env1->segs[seg_reg].newselector == 0);
5941 }
5942 else
5943 {
5944 /* For some reasons, it works even w/o save/restore of the jump buffer, so as code is
5945 time critical - let's not do that */
5946#ifdef FORCE_SEGMENT_SYNC
5947 memcpy(&old_buf, &env1->jmp_env, sizeof(old_buf));
5948#endif
5949 if (setjmp(env1->jmp_env) == 0)
5950 {
5951 if (seg_reg == R_CS)
5952 {
5953 uint32_t e1, e2;
5954 e1 = e2 = 0;
5955 load_segment(&e1, &e2, selector);
5956 cpu_x86_load_seg_cache(env, R_CS, selector,
5957 get_seg_base(e1, e2),
5958 get_seg_limit(e1, e2),
5959 e2);
5960 }
5961 else
5962 helper_load_seg(seg_reg, selector);
5963 /* We used to use tss_load_seg(seg_reg, selector); which, for some reasons ignored
5964 loading 0 selectors, what, in order, lead to subtle problems like #3588 */
5965
5966 env = savedenv;
5967
5968 /* Successful sync. */
5969 Assert(env1->segs[seg_reg].newselector == 0);
5970 }
5971 else
5972 {
5973 env = savedenv;
5974
5975 /* Postpone sync until the guest uses the selector. */
5976 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
5977 env1->segs[seg_reg].newselector = selector;
5978 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
5979 env1->exception_index = -1;
5980 env1->error_code = 0;
5981 env1->old_exception = -1;
5982 }
5983#ifdef FORCE_SEGMENT_SYNC
5984 memcpy(&env1->jmp_env, &old_buf, sizeof(old_buf));
5985#endif
5986 }
5987
5988}
5989
5990DECLINLINE(void) tb_reset_jump(TranslationBlock *tb, int n)
5991{
5992 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
5993}
5994
5995
5996int emulate_single_instr(CPUX86State *env1)
5997{
5998 TranslationBlock *tb;
5999 TranslationBlock *current;
6000 int flags;
6001 uint8_t *tc_ptr;
6002 target_ulong old_eip;
6003
6004 /* ensures env is loaded! */
6005 CPUX86State *savedenv = env;
6006 env = env1;
6007
6008 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
6009
6010 current = env->current_tb;
6011 env->current_tb = NULL;
6012 flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
6013
6014 /*
6015 * Translate only one instruction.
6016 */
6017 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
6018 tb = tb_gen_code(env, env->eip + env->segs[R_CS].base,
6019 env->segs[R_CS].base, flags, 0);
6020
6021 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
6022
6023
6024 /* tb_link_phys: */
6025 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
6026 tb->jmp_next[0] = NULL;
6027 tb->jmp_next[1] = NULL;
6028 Assert(tb->jmp_next[0] == NULL);
6029 Assert(tb->jmp_next[1] == NULL);
6030 if (tb->tb_next_offset[0] != 0xffff)
6031 tb_reset_jump(tb, 0);
6032 if (tb->tb_next_offset[1] != 0xffff)
6033 tb_reset_jump(tb, 1);
6034
6035 /*
6036 * Execute it using emulation
6037 */
6038 old_eip = env->eip;
6039 env->current_tb = tb;
6040
6041 /*
6042 * eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
6043 * perhaps not a very safe hack
6044 */
6045 while (old_eip == env->eip)
6046 {
6047 tc_ptr = tb->tc_ptr;
6048
6049#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
6050 int fake_ret;
6051 tcg_qemu_tb_exec(tc_ptr, fake_ret);
6052#else
6053 tcg_qemu_tb_exec(tc_ptr);
6054#endif
6055
6056 /*
6057 * Exit once we detect an external interrupt and interrupts are enabled
6058 */
6059 if ( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_EXTERNAL_TIMER))
6060 || ( (env->eflags & IF_MASK)
6061 && !(env->hflags & HF_INHIBIT_IRQ_MASK)
6062 && (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) )
6063 )
6064 {
6065 break;
6066 }
6067 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_FLUSH_TLB) {
6068 tlb_flush(env, true);
6069 }
6070 }
6071 env->current_tb = current;
6072
6073 tb_phys_invalidate(tb, -1);
6074 tb_free(tb);
6075/*
6076 Assert(tb->tb_next_offset[0] == 0xffff);
6077 Assert(tb->tb_next_offset[1] == 0xffff);
6078 Assert(tb->tb_next[0] == 0xffff);
6079 Assert(tb->tb_next[1] == 0xffff);
6080 Assert(tb->jmp_next[0] == NULL);
6081 Assert(tb->jmp_next[1] == NULL);
6082 Assert(tb->jmp_first == NULL); */
6083
6084 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
6085
6086 /*
6087 * Execute the next instruction when we encounter instruction fusing.
6088 */
6089 if (env->hflags & HF_INHIBIT_IRQ_MASK)
6090 {
6091 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
6092 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6093 emulate_single_instr(env);
6094 }
6095
6096 env = savedenv;
6097 return 0;
6098}
6099
6100/**
6101 * Correctly loads a new ldtr selector.
6102 *
6103 * @param env1 CPU environment.
6104 * @param selector Selector to load.
6105 */
6106void sync_ldtr(CPUX86State *env1, int selector)
6107{
6108 CPUX86State *saved_env = env;
6109 if (setjmp(env1->jmp_env) == 0)
6110 {
6111 env = env1;
6112 helper_lldt(selector);
6113 env = saved_env;
6114 }
6115 else
6116 {
6117 env = saved_env;
6118#ifdef VBOX_STRICT
6119 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
6120#endif
6121 }
6122}
6123
6124int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
6125 uint32_t *esp_ptr, int dpl)
6126{
6127 int type, index, shift;
6128
6129 CPUX86State *savedenv = env;
6130 env = env1;
6131
6132 if (!(env->tr.flags & DESC_P_MASK))
6133 cpu_abort(env, "invalid tss");
6134 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
6135 if ((type & 7) != 1)
6136 cpu_abort(env, "invalid tss type %d", type);
6137 shift = type >> 3;
6138 index = (dpl * 4 + 2) << shift;
6139 if (index + (4 << shift) - 1 > env->tr.limit)
6140 {
6141 env = savedenv;
6142 return 0;
6143 }
6144 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
6145
6146 if (shift == 0) {
6147 *esp_ptr = lduw_kernel(env->tr.base + index);
6148 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
6149 } else {
6150 *esp_ptr = ldl_kernel(env->tr.base + index);
6151 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
6152 }
6153
6154 env = savedenv;
6155 return 1;
6156}
6157
6158//*****************************************************************************
6159// Needs to be at the bottom of the file (overriding macros)
6160
6161static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
6162{
6163#ifdef USE_X86LDOUBLE
6164 CPU86_LDoubleU tmp;
6165 tmp.l.lower = *(uint64_t const *)ptr;
6166 tmp.l.upper = *(uint16_t const *)(ptr + 8);
6167 return tmp.d;
6168#else
6169# error "Busted FPU saving/restoring!"
6170 return *(CPU86_LDouble *)ptr;
6171#endif
6172}
6173
6174static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
6175{
6176#ifdef USE_X86LDOUBLE
6177 CPU86_LDoubleU tmp;
6178 tmp.d = f;
6179 *(uint64_t *)(ptr + 0) = tmp.l.lower;
6180 *(uint16_t *)(ptr + 8) = tmp.l.upper;
6181 *(uint16_t *)(ptr + 10) = 0;
6182 *(uint32_t *)(ptr + 12) = 0;
6183 AssertCompile(sizeof(long double) > 8);
6184#else
6185# error "Busted FPU saving/restoring!"
6186 *(CPU86_LDouble *)ptr = f;
6187#endif
6188}
6189
6190#undef stw
6191#undef stl
6192#undef stq
6193#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
6194#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
6195#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
6196
6197//*****************************************************************************
6198void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6199{
6200 int fpus, fptag, i, nb_xmm_regs;
6201 CPU86_LDouble tmp;
6202 uint8_t *addr;
6203 int data64 = !!(env->hflags & HF_LMA_MASK);
6204
6205 if (env->cpuid_features & CPUID_FXSR)
6206 {
6207 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6208 fptag = 0;
6209 for(i = 0; i < 8; i++) {
6210 fptag |= (env->fptags[i] << i);
6211 }
6212 stw(ptr, env->fpuc);
6213 stw(ptr + 2, fpus);
6214 stw(ptr + 4, fptag ^ 0xff);
6215
6216 addr = ptr + 0x20;
6217 for(i = 0;i < 8; i++) {
6218 tmp = ST(i);
6219 helper_fstt_raw(tmp, addr);
6220 addr += 16;
6221 }
6222
6223 if (env->cr[4] & CR4_OSFXSR_MASK) {
6224 /* XXX: finish it */
6225 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
6226 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
6227 nb_xmm_regs = 8 << data64;
6228 addr = ptr + 0xa0;
6229 for(i = 0; i < nb_xmm_regs; i++) {
6230#if __GNUC__ < 4
6231 stq(addr, env->xmm_regs[i].XMM_Q(0));
6232 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
6233#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
6234 stl(addr, env->xmm_regs[i].XMM_L(0));
6235 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
6236 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
6237 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
6238#endif
6239 addr += 16;
6240 }
6241 }
6242 }
6243 else
6244 {
6245 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6246 int fptag;
6247
6248 fp->FCW = env->fpuc;
6249 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
6250 fptag = 0;
6251 for (i=7; i>=0; i--) {
6252 fptag <<= 2;
6253 if (env->fptags[i]) {
6254 fptag |= 3;
6255 } else {
6256 /* the FPU automatically computes it */
6257 }
6258 }
6259 fp->FTW = fptag;
6260
6261 for(i = 0;i < 8; i++) {
6262 tmp = ST(i);
6263 helper_fstt_raw(tmp, &fp->regs[i].au8[0]);
6264 }
6265 }
6266}
6267
6268//*****************************************************************************
6269#undef lduw
6270#undef ldl
6271#undef ldq
6272#define lduw(a) *(uint16_t *)(a)
6273#define ldl(a) *(uint32_t *)(a)
6274#define ldq(a) *(uint64_t *)(a)
6275//*****************************************************************************
6276void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
6277{
6278 int i, fpus, fptag, nb_xmm_regs;
6279 CPU86_LDouble tmp;
6280 uint8_t *addr;
6281 int data64 = !!(env->hflags & HF_LMA_MASK); /* don't use HF_CS64_MASK here as cs hasn't been synced when this function is called. */
6282
6283 if (env->cpuid_features & CPUID_FXSR)
6284 {
6285 env->fpuc = lduw(ptr);
6286 fpus = lduw(ptr + 2);
6287 fptag = lduw(ptr + 4);
6288 env->fpstt = (fpus >> 11) & 7;
6289 env->fpus = fpus & ~0x3800;
6290 fptag ^= 0xff;
6291 for(i = 0;i < 8; i++) {
6292 env->fptags[i] = ((fptag >> i) & 1);
6293 }
6294
6295 addr = ptr + 0x20;
6296 for(i = 0;i < 8; i++) {
6297 tmp = helper_fldt_raw(addr);
6298 ST(i) = tmp;
6299 addr += 16;
6300 }
6301
6302 if (env->cr[4] & CR4_OSFXSR_MASK) {
6303 /* XXX: finish it, endianness */
6304 env->mxcsr = ldl(ptr + 0x18);
6305 //ldl(ptr + 0x1c);
6306 nb_xmm_regs = 8 << data64;
6307 addr = ptr + 0xa0;
6308 for(i = 0; i < nb_xmm_regs; i++) {
6309#if HC_ARCH_BITS == 32
6310 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
6311 env->xmm_regs[i].XMM_L(0) = ldl(addr);
6312 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
6313 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
6314 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
6315#else
6316 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
6317 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
6318#endif
6319 addr += 16;
6320 }
6321 }
6322 }
6323 else
6324 {
6325 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
6326 int fptag, j;
6327
6328 env->fpuc = fp->FCW;
6329 env->fpstt = (fp->FSW >> 11) & 7;
6330 env->fpus = fp->FSW & ~0x3800;
6331 fptag = fp->FTW;
6332 for(i = 0;i < 8; i++) {
6333 env->fptags[i] = ((fptag & 3) == 3);
6334 fptag >>= 2;
6335 }
6336 j = env->fpstt;
6337 for(i = 0;i < 8; i++) {
6338 tmp = helper_fldt_raw(&fp->regs[i].au8[0]);
6339 ST(i) = tmp;
6340 }
6341 }
6342}
6343//*****************************************************************************
6344//*****************************************************************************
6345
6346#endif /* VBOX */
6347
6348/* Secure Virtual Machine helpers */
6349
6350#if defined(CONFIG_USER_ONLY)
6351
6352void helper_vmrun(int aflag, int next_eip_addend)
6353{
6354}
6355void helper_vmmcall(void)
6356{
6357}
6358void helper_vmload(int aflag)
6359{
6360}
6361void helper_vmsave(int aflag)
6362{
6363}
6364void helper_stgi(void)
6365{
6366}
6367void helper_clgi(void)
6368{
6369}
6370void helper_skinit(void)
6371{
6372}
6373void helper_invlpga(int aflag)
6374{
6375}
6376void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6377{
6378}
6379void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6380{
6381}
6382
6383void helper_svm_check_io(uint32_t port, uint32_t param,
6384 uint32_t next_eip_addend)
6385{
6386}
6387#else
6388
6389static inline void svm_save_seg(target_phys_addr_t addr,
6390 const SegmentCache *sc)
6391{
6392 stw_phys(addr + offsetof(struct vmcb_seg, selector),
6393 sc->selector);
6394 stq_phys(addr + offsetof(struct vmcb_seg, base),
6395 sc->base);
6396 stl_phys(addr + offsetof(struct vmcb_seg, limit),
6397 sc->limit);
6398 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
6399 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
6400}
6401
6402static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
6403{
6404 unsigned int flags;
6405
6406 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
6407 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
6408 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
6409 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
6410 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
6411}
6412
6413static inline void svm_load_seg_cache(target_phys_addr_t addr,
6414 CPUState *env, int seg_reg)
6415{
6416 SegmentCache sc1, *sc = &sc1;
6417 svm_load_seg(addr, sc);
6418 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
6419 sc->base, sc->limit, sc->flags);
6420}
6421
6422void helper_vmrun(int aflag, int next_eip_addend)
6423{
6424 target_ulong addr;
6425 uint32_t event_inj;
6426 uint32_t int_ctl;
6427
6428 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
6429
6430 if (aflag == 2)
6431 addr = EAX;
6432 else
6433 addr = (uint32_t)EAX;
6434
6435 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
6436
6437 env->vm_vmcb = addr;
6438
6439 /* save the current CPU state in the hsave page */
6440 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6441 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6442
6443 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6444 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6445
6446 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
6447 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
6448 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
6449 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
6450 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
6451 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
6452
6453 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
6454 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
6455
6456 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
6457 &env->segs[R_ES]);
6458 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
6459 &env->segs[R_CS]);
6460 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
6461 &env->segs[R_SS]);
6462 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
6463 &env->segs[R_DS]);
6464
6465 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
6466 EIP + next_eip_addend);
6467 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
6468 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
6469
6470 /* load the interception bitmaps so we do not need to access the
6471 vmcb in svm mode */
6472 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
6473 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
6474 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
6475 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
6476 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
6477 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
6478
6479 /* enable intercepts */
6480 env->hflags |= HF_SVMI_MASK;
6481
6482 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
6483
6484 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
6485 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
6486
6487 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
6488 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
6489
6490 /* clear exit_info_2 so we behave like the real hardware */
6491 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
6492
6493 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
6494 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
6495 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
6496 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
6497 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6498 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6499 if (int_ctl & V_INTR_MASKING_MASK) {
6500 env->v_tpr = int_ctl & V_TPR_MASK;
6501 env->hflags2 |= HF2_VINTR_MASK;
6502 if (env->eflags & IF_MASK)
6503 env->hflags2 |= HF2_HIF_MASK;
6504 }
6505
6506 cpu_load_efer(env,
6507 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
6508 env->eflags = 0;
6509 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
6510 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6511 CC_OP = CC_OP_EFLAGS;
6512
6513 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
6514 env, R_ES);
6515 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6516 env, R_CS);
6517 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6518 env, R_SS);
6519 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6520 env, R_DS);
6521
6522 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
6523 env->eip = EIP;
6524 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
6525 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
6526 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
6527 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
6528 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
6529
6530 /* FIXME: guest state consistency checks */
6531
6532 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
6533 case TLB_CONTROL_DO_NOTHING:
6534 break;
6535 case TLB_CONTROL_FLUSH_ALL_ASID:
6536 /* FIXME: this is not 100% correct but should work for now */
6537 tlb_flush(env, 1);
6538 break;
6539 }
6540
6541 env->hflags2 |= HF2_GIF_MASK;
6542
6543 if (int_ctl & V_IRQ_MASK) {
6544 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
6545 }
6546
6547 /* maybe we need to inject an event */
6548 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
6549 if (event_inj & SVM_EVTINJ_VALID) {
6550 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
6551 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
6552 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
6553
6554 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
6555 /* FIXME: need to implement valid_err */
6556 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
6557 case SVM_EVTINJ_TYPE_INTR:
6558 env->exception_index = vector;
6559 env->error_code = event_inj_err;
6560 env->exception_is_int = 0;
6561 env->exception_next_eip = -1;
6562 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
6563 /* XXX: is it always correct ? */
6564 do_interrupt(vector, 0, 0, 0, 1);
6565 break;
6566 case SVM_EVTINJ_TYPE_NMI:
6567 env->exception_index = EXCP02_NMI;
6568 env->error_code = event_inj_err;
6569 env->exception_is_int = 0;
6570 env->exception_next_eip = EIP;
6571 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
6572 cpu_loop_exit();
6573 break;
6574 case SVM_EVTINJ_TYPE_EXEPT:
6575 env->exception_index = vector;
6576 env->error_code = event_inj_err;
6577 env->exception_is_int = 0;
6578 env->exception_next_eip = -1;
6579 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
6580 cpu_loop_exit();
6581 break;
6582 case SVM_EVTINJ_TYPE_SOFT:
6583 env->exception_index = vector;
6584 env->error_code = event_inj_err;
6585 env->exception_is_int = 1;
6586 env->exception_next_eip = EIP;
6587 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
6588 cpu_loop_exit();
6589 break;
6590 }
6591 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
6592 }
6593}
6594
6595void helper_vmmcall(void)
6596{
6597 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
6598 raise_exception(EXCP06_ILLOP);
6599}
6600
6601void helper_vmload(int aflag)
6602{
6603 target_ulong addr;
6604 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
6605
6606 if (aflag == 2)
6607 addr = EAX;
6608 else
6609 addr = (uint32_t)EAX;
6610
6611 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6612 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6613 env->segs[R_FS].base);
6614
6615 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
6616 env, R_FS);
6617 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
6618 env, R_GS);
6619 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
6620 &env->tr);
6621 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
6622 &env->ldt);
6623
6624#ifdef TARGET_X86_64
6625 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
6626 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
6627 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
6628 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
6629#endif
6630 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
6631 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
6632 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
6633 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
6634}
6635
6636void helper_vmsave(int aflag)
6637{
6638 target_ulong addr;
6639 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
6640
6641 if (aflag == 2)
6642 addr = EAX;
6643 else
6644 addr = (uint32_t)EAX;
6645
6646 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
6647 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
6648 env->segs[R_FS].base);
6649
6650 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
6651 &env->segs[R_FS]);
6652 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
6653 &env->segs[R_GS]);
6654 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
6655 &env->tr);
6656 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
6657 &env->ldt);
6658
6659#ifdef TARGET_X86_64
6660 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
6661 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
6662 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
6663 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
6664#endif
6665 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
6666 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
6667 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
6668 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
6669}
6670
6671void helper_stgi(void)
6672{
6673 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
6674 env->hflags2 |= HF2_GIF_MASK;
6675}
6676
6677void helper_clgi(void)
6678{
6679 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
6680 env->hflags2 &= ~HF2_GIF_MASK;
6681}
6682
6683void helper_skinit(void)
6684{
6685 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
6686 /* XXX: not implemented */
6687 raise_exception(EXCP06_ILLOP);
6688}
6689
6690void helper_invlpga(int aflag)
6691{
6692 target_ulong addr;
6693 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
6694
6695 if (aflag == 2)
6696 addr = EAX;
6697 else
6698 addr = (uint32_t)EAX;
6699
6700 /* XXX: could use the ASID to see if it is needed to do the
6701 flush */
6702 tlb_flush_page(env, addr);
6703}
6704
6705void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
6706{
6707 if (likely(!(env->hflags & HF_SVMI_MASK)))
6708 return;
6709#ifndef VBOX
6710 switch(type) {
6711 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
6712 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
6713 helper_vmexit(type, param);
6714 }
6715 break;
6716 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
6717 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
6718 helper_vmexit(type, param);
6719 }
6720 break;
6721 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
6722 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
6723 helper_vmexit(type, param);
6724 }
6725 break;
6726 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
6727 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
6728 helper_vmexit(type, param);
6729 }
6730 break;
6731 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
6732 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
6733 helper_vmexit(type, param);
6734 }
6735 break;
6736 case SVM_EXIT_MSR:
6737 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
6738 /* FIXME: this should be read in at vmrun (faster this way?) */
6739 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
6740 uint32_t t0, t1;
6741 switch((uint32_t)ECX) {
6742 case 0 ... 0x1fff:
6743 t0 = (ECX * 2) % 8;
6744 t1 = ECX / 8;
6745 break;
6746 case 0xc0000000 ... 0xc0001fff:
6747 t0 = (8192 + ECX - 0xc0000000) * 2;
6748 t1 = (t0 / 8);
6749 t0 %= 8;
6750 break;
6751 case 0xc0010000 ... 0xc0011fff:
6752 t0 = (16384 + ECX - 0xc0010000) * 2;
6753 t1 = (t0 / 8);
6754 t0 %= 8;
6755 break;
6756 default:
6757 helper_vmexit(type, param);
6758 t0 = 0;
6759 t1 = 0;
6760 break;
6761 }
6762 if (ldub_phys(addr + t1) & ((1 << param) << t0))
6763 helper_vmexit(type, param);
6764 }
6765 break;
6766 default:
6767 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
6768 helper_vmexit(type, param);
6769 }
6770 break;
6771 }
6772#else /* VBOX */
6773 AssertMsgFailed(("We shouldn't be here, HM supported differently!"));
6774#endif /* VBOX */
6775}
6776
6777void helper_svm_check_io(uint32_t port, uint32_t param,
6778 uint32_t next_eip_addend)
6779{
6780 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
6781 /* FIXME: this should be read in at vmrun (faster this way?) */
6782 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
6783 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
6784 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
6785 /* next EIP */
6786 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
6787 env->eip + next_eip_addend);
6788 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
6789 }
6790 }
6791}
6792
6793/* Note: currently only 32 bits of exit_code are used */
6794void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
6795{
6796 uint32_t int_ctl;
6797
6798 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
6799 exit_code, exit_info_1,
6800 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
6801 EIP);
6802
6803 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
6804 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
6805 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
6806 } else {
6807 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
6808 }
6809
6810 /* Save the VM state in the vmcb */
6811 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
6812 &env->segs[R_ES]);
6813 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
6814 &env->segs[R_CS]);
6815 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
6816 &env->segs[R_SS]);
6817 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
6818 &env->segs[R_DS]);
6819
6820 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
6821 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
6822
6823 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
6824 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
6825
6826 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
6827 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
6828 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
6829 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
6830 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
6831
6832 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
6833 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
6834 int_ctl |= env->v_tpr & V_TPR_MASK;
6835 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
6836 int_ctl |= V_IRQ_MASK;
6837 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
6838
6839 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
6840 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
6841 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
6842 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
6843 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
6844 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
6845 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
6846
6847 /* Reload the host state from vm_hsave */
6848 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
6849 env->hflags &= ~HF_SVMI_MASK;
6850 env->intercept = 0;
6851 env->intercept_exceptions = 0;
6852 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
6853 env->tsc_offset = 0;
6854
6855 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
6856 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
6857
6858 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
6859 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
6860
6861 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
6862 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
6863 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
6864 /* we need to set the efer after the crs so the hidden flags get
6865 set properly */
6866 cpu_load_efer(env,
6867 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
6868 env->eflags = 0;
6869 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
6870 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
6871 CC_OP = CC_OP_EFLAGS;
6872
6873 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
6874 env, R_ES);
6875 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
6876 env, R_CS);
6877 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
6878 env, R_SS);
6879 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
6880 env, R_DS);
6881
6882 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
6883 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
6884 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
6885
6886 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
6887 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
6888
6889 /* other setups */
6890 cpu_x86_set_cpl(env, 0);
6891 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
6892 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
6893
6894 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
6895 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
6896 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
6897 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
6898 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
6899
6900 env->hflags2 &= ~HF2_GIF_MASK;
6901 /* FIXME: Resets the current ASID register to zero (host ASID). */
6902
6903 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
6904
6905 /* Clears the TSC_OFFSET inside the processor. */
6906
6907 /* If the host is in PAE mode, the processor reloads the host's PDPEs
6908 from the page table indicated the host's CR3. If the PDPEs contain
6909 illegal state, the processor causes a shutdown. */
6910
6911 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
6912 env->cr[0] |= CR0_PE_MASK;
6913 env->eflags &= ~VM_MASK;
6914
6915 /* Disables all breakpoints in the host DR7 register. */
6916
6917 /* Checks the reloaded host state for consistency. */
6918
6919 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
6920 host's code segment or non-canonical (in the case of long mode), a
6921 #GP fault is delivered inside the host.) */
6922
6923 /* remove any pending exception */
6924 env->exception_index = -1;
6925 env->error_code = 0;
6926 env->old_exception = -1;
6927
6928 cpu_loop_exit();
6929}
6930
6931#endif
6932
6933/* MMX/SSE */
6934/* XXX: optimize by storing fptt and fptags in the static cpu state */
6935void helper_enter_mmx(void)
6936{
6937 env->fpstt = 0;
6938 *(uint32_t *)(env->fptags) = 0;
6939 *(uint32_t *)(env->fptags + 4) = 0;
6940}
6941
6942void helper_emms(void)
6943{
6944 /* set to empty state */
6945 *(uint32_t *)(env->fptags) = 0x01010101;
6946 *(uint32_t *)(env->fptags + 4) = 0x01010101;
6947}
6948
6949/* XXX: suppress */
6950void helper_movq(void *d, void *s)
6951{
6952 *(uint64_t *)d = *(uint64_t *)s;
6953}
6954
6955#define SHIFT 0
6956#include "ops_sse.h"
6957
6958#define SHIFT 1
6959#include "ops_sse.h"
6960
6961#define SHIFT 0
6962#include "helper_template.h"
6963#undef SHIFT
6964
6965#define SHIFT 1
6966#include "helper_template.h"
6967#undef SHIFT
6968
6969#define SHIFT 2
6970#include "helper_template.h"
6971#undef SHIFT
6972
6973#ifdef TARGET_X86_64
6974
6975#define SHIFT 3
6976#include "helper_template.h"
6977#undef SHIFT
6978
6979#endif
6980
6981/* bit operations */
6982target_ulong helper_bsf(target_ulong t0)
6983{
6984 int count;
6985 target_ulong res;
6986
6987 res = t0;
6988 count = 0;
6989 while ((res & 1) == 0) {
6990 count++;
6991 res >>= 1;
6992 }
6993 return count;
6994}
6995
6996target_ulong helper_lzcnt(target_ulong t0, int wordsize)
6997{
6998 int count;
6999 target_ulong res, mask;
7000
7001 if (wordsize > 0 && t0 == 0) {
7002 return wordsize;
7003 }
7004 res = t0;
7005 count = TARGET_LONG_BITS - 1;
7006 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
7007 while ((res & mask) == 0) {
7008 count--;
7009 res <<= 1;
7010 }
7011 if (wordsize > 0) {
7012 return wordsize - 1 - count;
7013 }
7014 return count;
7015}
7016
7017target_ulong helper_bsr(target_ulong t0)
7018{
7019 return helper_lzcnt(t0, 0);
7020}
7021
7022static int compute_all_eflags(void)
7023{
7024 return CC_SRC;
7025}
7026
7027static int compute_c_eflags(void)
7028{
7029 return CC_SRC & CC_C;
7030}
7031
7032uint32_t helper_cc_compute_all(int op)
7033{
7034 switch (op) {
7035 default: /* should never happen */ return 0;
7036
7037 case CC_OP_EFLAGS: return compute_all_eflags();
7038
7039 case CC_OP_MULB: return compute_all_mulb();
7040 case CC_OP_MULW: return compute_all_mulw();
7041 case CC_OP_MULL: return compute_all_mull();
7042
7043 case CC_OP_ADDB: return compute_all_addb();
7044 case CC_OP_ADDW: return compute_all_addw();
7045 case CC_OP_ADDL: return compute_all_addl();
7046
7047 case CC_OP_ADCB: return compute_all_adcb();
7048 case CC_OP_ADCW: return compute_all_adcw();
7049 case CC_OP_ADCL: return compute_all_adcl();
7050
7051 case CC_OP_SUBB: return compute_all_subb();
7052 case CC_OP_SUBW: return compute_all_subw();
7053 case CC_OP_SUBL: return compute_all_subl();
7054
7055 case CC_OP_SBBB: return compute_all_sbbb();
7056 case CC_OP_SBBW: return compute_all_sbbw();
7057 case CC_OP_SBBL: return compute_all_sbbl();
7058
7059 case CC_OP_LOGICB: return compute_all_logicb();
7060 case CC_OP_LOGICW: return compute_all_logicw();
7061 case CC_OP_LOGICL: return compute_all_logicl();
7062
7063 case CC_OP_INCB: return compute_all_incb();
7064 case CC_OP_INCW: return compute_all_incw();
7065 case CC_OP_INCL: return compute_all_incl();
7066
7067 case CC_OP_DECB: return compute_all_decb();
7068 case CC_OP_DECW: return compute_all_decw();
7069 case CC_OP_DECL: return compute_all_decl();
7070
7071 case CC_OP_SHLB: return compute_all_shlb();
7072 case CC_OP_SHLW: return compute_all_shlw();
7073 case CC_OP_SHLL: return compute_all_shll();
7074
7075 case CC_OP_SARB: return compute_all_sarb();
7076 case CC_OP_SARW: return compute_all_sarw();
7077 case CC_OP_SARL: return compute_all_sarl();
7078
7079#ifdef TARGET_X86_64
7080 case CC_OP_MULQ: return compute_all_mulq();
7081
7082 case CC_OP_ADDQ: return compute_all_addq();
7083
7084 case CC_OP_ADCQ: return compute_all_adcq();
7085
7086 case CC_OP_SUBQ: return compute_all_subq();
7087
7088 case CC_OP_SBBQ: return compute_all_sbbq();
7089
7090 case CC_OP_LOGICQ: return compute_all_logicq();
7091
7092 case CC_OP_INCQ: return compute_all_incq();
7093
7094 case CC_OP_DECQ: return compute_all_decq();
7095
7096 case CC_OP_SHLQ: return compute_all_shlq();
7097
7098 case CC_OP_SARQ: return compute_all_sarq();
7099#endif
7100 }
7101}
7102
7103uint32_t helper_cc_compute_c(int op)
7104{
7105 switch (op) {
7106 default: /* should never happen */ return 0;
7107
7108 case CC_OP_EFLAGS: return compute_c_eflags();
7109
7110 case CC_OP_MULB: return compute_c_mull();
7111 case CC_OP_MULW: return compute_c_mull();
7112 case CC_OP_MULL: return compute_c_mull();
7113
7114 case CC_OP_ADDB: return compute_c_addb();
7115 case CC_OP_ADDW: return compute_c_addw();
7116 case CC_OP_ADDL: return compute_c_addl();
7117
7118 case CC_OP_ADCB: return compute_c_adcb();
7119 case CC_OP_ADCW: return compute_c_adcw();
7120 case CC_OP_ADCL: return compute_c_adcl();
7121
7122 case CC_OP_SUBB: return compute_c_subb();
7123 case CC_OP_SUBW: return compute_c_subw();
7124 case CC_OP_SUBL: return compute_c_subl();
7125
7126 case CC_OP_SBBB: return compute_c_sbbb();
7127 case CC_OP_SBBW: return compute_c_sbbw();
7128 case CC_OP_SBBL: return compute_c_sbbl();
7129
7130 case CC_OP_LOGICB: return compute_c_logicb();
7131 case CC_OP_LOGICW: return compute_c_logicw();
7132 case CC_OP_LOGICL: return compute_c_logicl();
7133
7134 case CC_OP_INCB: return compute_c_incl();
7135 case CC_OP_INCW: return compute_c_incl();
7136 case CC_OP_INCL: return compute_c_incl();
7137
7138 case CC_OP_DECB: return compute_c_incl();
7139 case CC_OP_DECW: return compute_c_incl();
7140 case CC_OP_DECL: return compute_c_incl();
7141
7142 case CC_OP_SHLB: return compute_c_shlb();
7143 case CC_OP_SHLW: return compute_c_shlw();
7144 case CC_OP_SHLL: return compute_c_shll();
7145
7146 case CC_OP_SARB: return compute_c_sarl();
7147 case CC_OP_SARW: return compute_c_sarl();
7148 case CC_OP_SARL: return compute_c_sarl();
7149
7150#ifdef TARGET_X86_64
7151 case CC_OP_MULQ: return compute_c_mull();
7152
7153 case CC_OP_ADDQ: return compute_c_addq();
7154
7155 case CC_OP_ADCQ: return compute_c_adcq();
7156
7157 case CC_OP_SUBQ: return compute_c_subq();
7158
7159 case CC_OP_SBBQ: return compute_c_sbbq();
7160
7161 case CC_OP_LOGICQ: return compute_c_logicq();
7162
7163 case CC_OP_INCQ: return compute_c_incl();
7164
7165 case CC_OP_DECQ: return compute_c_incl();
7166
7167 case CC_OP_SHLQ: return compute_c_shlq();
7168
7169 case CC_OP_SARQ: return compute_c_sarl();
7170#endif
7171 }
7172}
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette