VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 18661

最後變更 在這個檔案從18661是 18082,由 vboxsync 提交於 16 年 前

recompiler adaption of r44723

  • 屬性 svn:eol-style 設為 native
檔案大小: 135.0 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifdef VBOX
30# include <VBox/err.h>
31# ifdef VBOX_WITH_VMI
32# include <VBox/parav.h>
33# endif
34#endif
35#include "exec.h"
36
37//#define DEBUG_PCALL
38
39#if 0
40#define raise_exception_err(a, b)\
41do {\
42 if (logfile)\
43 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
44 (raise_exception_err)(a, b);\
45} while (0)
46#endif
47
48const uint8_t parity_table[256] = {
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
80 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81};
82
83/* modulo 17 table */
84const uint8_t rclw_table[32] = {
85 0, 1, 2, 3, 4, 5, 6, 7,
86 8, 9,10,11,12,13,14,15,
87 16, 0, 1, 2, 3, 4, 5, 6,
88 7, 8, 9,10,11,12,13,14,
89};
90
91/* modulo 9 table */
92const uint8_t rclb_table[32] = {
93 0, 1, 2, 3, 4, 5, 6, 7,
94 8, 0, 1, 2, 3, 4, 5, 6,
95 7, 8, 0, 1, 2, 3, 4, 5,
96 6, 7, 8, 0, 1, 2, 3, 4,
97};
98
99const CPU86_LDouble f15rk[7] =
100{
101 0.00000000000000000000L,
102 1.00000000000000000000L,
103 3.14159265358979323851L, /*pi*/
104 0.30102999566398119523L, /*lg2*/
105 0.69314718055994530943L, /*ln2*/
106 1.44269504088896340739L, /*l2e*/
107 3.32192809488736234781L, /*l2t*/
108};
109
110/* thread support */
111
112spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
113
114void cpu_lock(void)
115{
116 spin_lock(&global_cpu_lock);
117}
118
119void cpu_unlock(void)
120{
121 spin_unlock(&global_cpu_lock);
122}
123
124void cpu_loop_exit(void)
125{
126 /* NOTE: the register at this point must be saved by hand because
127 longjmp restore them */
128 regs_to_env();
129 longjmp(env->jmp_env, 1);
130}
131
132/* return non zero if error */
133static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
134 int selector)
135{
136 SegmentCache *dt;
137 int index;
138 target_ulong ptr;
139
140 if (selector & 0x4)
141 dt = &env->ldt;
142 else
143 dt = &env->gdt;
144 index = selector & ~7;
145 if ((index + 7) > dt->limit)
146 return -1;
147 ptr = dt->base + index;
148 *e1_ptr = ldl_kernel(ptr);
149 *e2_ptr = ldl_kernel(ptr + 4);
150 return 0;
151}
152
153static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
154{
155 unsigned int limit;
156 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
157 if (e2 & DESC_G_MASK)
158 limit = (limit << 12) | 0xfff;
159 return limit;
160}
161
162static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
163{
164 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
165}
166
167static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
168{
169 sc->base = get_seg_base(e1, e2);
170 sc->limit = get_seg_limit(e1, e2);
171 sc->flags = e2;
172}
173
174/* init the segment cache in vm86 mode. */
175static inline void load_seg_vm(int seg, int selector)
176{
177 selector &= 0xffff;
178#ifdef VBOX
179 unsigned flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK;
180
181 if (seg == R_CS)
182 flags |= DESC_CS_MASK;
183
184 cpu_x86_load_seg_cache(env, seg, selector,
185 (selector << 4), 0xffff, flags);
186#else
187 cpu_x86_load_seg_cache(env, seg, selector,
188 (selector << 4), 0xffff, 0);
189#endif
190}
191
192static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
193 uint32_t *esp_ptr, int dpl)
194{
195 int type, index, shift;
196
197#if 0
198 {
199 int i;
200 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
201 for(i=0;i<env->tr.limit;i++) {
202 printf("%02x ", env->tr.base[i]);
203 if ((i & 7) == 7) printf("\n");
204 }
205 printf("\n");
206 }
207#endif
208
209 if (!(env->tr.flags & DESC_P_MASK))
210 cpu_abort(env, "invalid tss");
211 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
212 if ((type & 7) != 1)
213 cpu_abort(env, "invalid tss type %d", type);
214 shift = type >> 3;
215 index = (dpl * 4 + 2) << shift;
216 if (index + (4 << shift) - 1 > env->tr.limit)
217 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
218 if (shift == 0) {
219 *esp_ptr = lduw_kernel(env->tr.base + index);
220 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
221 } else {
222 *esp_ptr = ldl_kernel(env->tr.base + index);
223 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
224 }
225}
226
227/* XXX: merge with load_seg() */
228static void tss_load_seg(int seg_reg, int selector)
229{
230 uint32_t e1, e2;
231 int rpl, dpl, cpl;
232
233 if ((selector & 0xfffc) != 0) {
234 if (load_segment(&e1, &e2, selector) != 0)
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (!(e2 & DESC_S_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 rpl = selector & 3;
239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240 cpl = env->hflags & HF_CPL_MASK;
241 if (seg_reg == R_CS) {
242 if (!(e2 & DESC_CS_MASK))
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 /* XXX: is it correct ? */
245 if (dpl != rpl)
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if ((e2 & DESC_C_MASK) && dpl > rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else if (seg_reg == R_SS) {
250 /* SS must be writable data */
251 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 if (dpl != cpl || dpl != rpl)
254 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255 } else {
256 /* not readable code */
257 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
258 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 /* if data or non conforming code, checks the rights */
260 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
261 if (dpl < cpl || dpl < rpl)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263 }
264 }
265 if (!(e2 & DESC_P_MASK))
266 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
267 cpu_x86_load_seg_cache(env, seg_reg, selector,
268 get_seg_base(e1, e2),
269 get_seg_limit(e1, e2),
270 e2);
271 } else {
272 if (seg_reg == R_SS || seg_reg == R_CS)
273 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
274 }
275}
276
277#define SWITCH_TSS_JMP 0
278#define SWITCH_TSS_IRET 1
279#define SWITCH_TSS_CALL 2
280
281/* XXX: restore CPU state in registers (PowerPC case) */
282static void switch_tss(int tss_selector,
283 uint32_t e1, uint32_t e2, int source,
284 uint32_t next_eip)
285{
286 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
287 target_ulong tss_base;
288 uint32_t new_regs[8], new_segs[6];
289 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
290 uint32_t old_eflags, eflags_mask;
291 SegmentCache *dt;
292 int index;
293 target_ulong ptr;
294
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296#ifdef DEBUG_PCALL
297 if (loglevel & CPU_LOG_PCALL)
298 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
299#endif
300
301#if defined(VBOX) && defined(DEBUG)
302 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
303#endif
304
305 /* if task gate, we read the TSS segment and we load it */
306 if (type == 5) {
307 if (!(e2 & DESC_P_MASK))
308 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309 tss_selector = e1 >> 16;
310 if (tss_selector & 4)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 if (load_segment(&e1, &e2, tss_selector) != 0)
313 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
314 if (e2 & DESC_S_MASK)
315 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
316 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
317 if ((type & 7) != 1)
318 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
319 }
320
321 if (!(e2 & DESC_P_MASK))
322 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
323
324 if (type & 8)
325 tss_limit_max = 103;
326 else
327 tss_limit_max = 43;
328 tss_limit = get_seg_limit(e1, e2);
329 tss_base = get_seg_base(e1, e2);
330 if ((tss_selector & 4) != 0 ||
331 tss_limit < tss_limit_max)
332 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
333 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
334 if (old_type & 8)
335 old_tss_limit_max = 103;
336 else
337 old_tss_limit_max = 43;
338
339 /* read all the registers from the new TSS */
340 if (type & 8) {
341 /* 32 bit */
342 new_cr3 = ldl_kernel(tss_base + 0x1c);
343 new_eip = ldl_kernel(tss_base + 0x20);
344 new_eflags = ldl_kernel(tss_base + 0x24);
345 for(i = 0; i < 8; i++)
346 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
347 for(i = 0; i < 6; i++)
348 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
349 new_ldt = lduw_kernel(tss_base + 0x60);
350 new_trap = ldl_kernel(tss_base + 0x64);
351 } else {
352 /* 16 bit */
353 new_cr3 = 0;
354 new_eip = lduw_kernel(tss_base + 0x0e);
355 new_eflags = lduw_kernel(tss_base + 0x10);
356 for(i = 0; i < 8; i++)
357 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
358 for(i = 0; i < 4; i++)
359 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
360 new_ldt = lduw_kernel(tss_base + 0x2a);
361 new_segs[R_FS] = 0;
362 new_segs[R_GS] = 0;
363 new_trap = 0;
364 }
365
366 /* NOTE: we must avoid memory exceptions during the task switch,
367 so we make dummy accesses before */
368 /* XXX: it can still fail in some cases, so a bigger hack is
369 necessary to valid the TLB after having done the accesses */
370
371 v1 = ldub_kernel(env->tr.base);
372 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
373 stb_kernel(env->tr.base, v1);
374 stb_kernel(env->tr.base + old_tss_limit_max, v2);
375
376 /* clear busy bit (it is restartable) */
377 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
378 target_ulong ptr;
379 uint32_t e2;
380 ptr = env->gdt.base + (env->tr.selector & ~7);
381 e2 = ldl_kernel(ptr + 4);
382 e2 &= ~DESC_TSS_BUSY_MASK;
383 stl_kernel(ptr + 4, e2);
384 }
385 old_eflags = compute_eflags();
386 if (source == SWITCH_TSS_IRET)
387 old_eflags &= ~NT_MASK;
388
389 /* save the current state in the old TSS */
390 if (type & 8) {
391 /* 32 bit */
392 stl_kernel(env->tr.base + 0x20, next_eip);
393 stl_kernel(env->tr.base + 0x24, old_eflags);
394 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
395 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
396 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
397 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
398 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
399 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
400 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
401 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
402 for(i = 0; i < 6; i++)
403 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
404#if defined(VBOX) && defined(DEBUG)
405 printf("TSS 32 bits switch\n");
406 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
407#endif
408 } else {
409 /* 16 bit */
410 stw_kernel(env->tr.base + 0x0e, next_eip);
411 stw_kernel(env->tr.base + 0x10, old_eflags);
412 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
413 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
414 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
415 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
416 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
417 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
418 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
419 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
420 for(i = 0; i < 4; i++)
421 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
422 }
423
424 /* now if an exception occurs, it will occurs in the next task
425 context */
426
427 if (source == SWITCH_TSS_CALL) {
428 stw_kernel(tss_base, env->tr.selector);
429 new_eflags |= NT_MASK;
430 }
431
432 /* set busy bit */
433 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
434 target_ulong ptr;
435 uint32_t e2;
436 ptr = env->gdt.base + (tss_selector & ~7);
437 e2 = ldl_kernel(ptr + 4);
438 e2 |= DESC_TSS_BUSY_MASK;
439 stl_kernel(ptr + 4, e2);
440 }
441
442 /* set the new CPU state */
443 /* from this point, any exception which occurs can give problems */
444 env->cr[0] |= CR0_TS_MASK;
445 env->hflags |= HF_TS_MASK;
446 env->tr.selector = tss_selector;
447 env->tr.base = tss_base;
448 env->tr.limit = tss_limit;
449 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
450
451 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
452 cpu_x86_update_cr3(env, new_cr3);
453 }
454
455 /* load all registers without an exception, then reload them with
456 possible exception */
457 env->eip = new_eip;
458 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
459 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
460 if (!(type & 8))
461 eflags_mask &= 0xffff;
462 load_eflags(new_eflags, eflags_mask);
463 /* XXX: what to do in 16 bit case ? */
464 EAX = new_regs[0];
465 ECX = new_regs[1];
466 EDX = new_regs[2];
467 EBX = new_regs[3];
468 ESP = new_regs[4];
469 EBP = new_regs[5];
470 ESI = new_regs[6];
471 EDI = new_regs[7];
472 if (new_eflags & VM_MASK) {
473 for(i = 0; i < 6; i++)
474 load_seg_vm(i, new_segs[i]);
475 /* in vm86, CPL is always 3 */
476 cpu_x86_set_cpl(env, 3);
477 } else {
478 /* CPL is set the RPL of CS */
479 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
480 /* first just selectors as the rest may trigger exceptions */
481 for(i = 0; i < 6; i++)
482 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
483 }
484
485 env->ldt.selector = new_ldt & ~4;
486 env->ldt.base = 0;
487 env->ldt.limit = 0;
488 env->ldt.flags = 0;
489
490 /* load the LDT */
491 if (new_ldt & 4)
492 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
494 if ((new_ldt & 0xfffc) != 0) {
495 dt = &env->gdt;
496 index = new_ldt & ~7;
497 if ((index + 7) > dt->limit)
498 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
499 ptr = dt->base + index;
500 e1 = ldl_kernel(ptr);
501 e2 = ldl_kernel(ptr + 4);
502 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
503 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
504 if (!(e2 & DESC_P_MASK))
505 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
506 load_seg_cache_raw_dt(&env->ldt, e1, e2);
507 }
508
509 /* load the segments */
510 if (!(new_eflags & VM_MASK)) {
511 tss_load_seg(R_CS, new_segs[R_CS]);
512 tss_load_seg(R_SS, new_segs[R_SS]);
513 tss_load_seg(R_ES, new_segs[R_ES]);
514 tss_load_seg(R_DS, new_segs[R_DS]);
515 tss_load_seg(R_FS, new_segs[R_FS]);
516 tss_load_seg(R_GS, new_segs[R_GS]);
517 }
518
519 /* check that EIP is in the CS segment limits */
520 if (new_eip > env->segs[R_CS].limit) {
521 /* XXX: different exception if CALL ? */
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526/* check if Port I/O is allowed in TSS */
527static inline void check_io(int addr, int size)
528{
529 int io_offset, val, mask;
530
531 /* TSS must be a valid 32 bit one */
532 if (!(env->tr.flags & DESC_P_MASK) ||
533 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
534 env->tr.limit < 103)
535 goto fail;
536 io_offset = lduw_kernel(env->tr.base + 0x66);
537 io_offset += (addr >> 3);
538 /* Note: the check needs two bytes */
539 if ((io_offset + 1) > env->tr.limit)
540 goto fail;
541 val = lduw_kernel(env->tr.base + io_offset);
542 val >>= (addr & 7);
543 mask = (1 << size) - 1;
544 /* all bits must be zero to allow the I/O */
545 if ((val & mask) != 0) {
546 fail:
547 raise_exception_err(EXCP0D_GPF, 0);
548 }
549}
550
551void check_iob_T0(void)
552{
553 check_io(T0, 1);
554}
555
556void check_iow_T0(void)
557{
558 check_io(T0, 2);
559}
560
561void check_iol_T0(void)
562{
563 check_io(T0, 4);
564}
565
566void check_iob_DX(void)
567{
568 check_io(EDX & 0xffff, 1);
569}
570
571void check_iow_DX(void)
572{
573 check_io(EDX & 0xffff, 2);
574}
575
576void check_iol_DX(void)
577{
578 check_io(EDX & 0xffff, 4);
579}
580
581static inline unsigned int get_sp_mask(unsigned int e2)
582{
583 if (e2 & DESC_B_MASK)
584 return 0xffffffff;
585 else
586 return 0xffff;
587}
588
589#ifdef TARGET_X86_64
590#define SET_ESP(val, sp_mask)\
591do {\
592 if ((sp_mask) == 0xffff)\
593 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
594 else if ((sp_mask) == 0xffffffffLL)\
595 ESP = (uint32_t)(val);\
596 else\
597 ESP = (val);\
598} while (0)
599#else
600#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
601#endif
602
603/* XXX: add a is_user flag to have proper security support */
604#define PUSHW(ssp, sp, sp_mask, val)\
605{\
606 sp -= 2;\
607 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
608}
609
610#define PUSHL(ssp, sp, sp_mask, val)\
611{\
612 sp -= 4;\
613 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
614}
615
616#define POPW(ssp, sp, sp_mask, val)\
617{\
618 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
619 sp += 2;\
620}
621
622#define POPL(ssp, sp, sp_mask, val)\
623{\
624 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
625 sp += 4;\
626}
627
628/* protected mode interrupt */
629static void do_interrupt_protected(int intno, int is_int, int error_code,
630 unsigned int next_eip, int is_hw)
631{
632 SegmentCache *dt;
633 target_ulong ptr, ssp;
634 int type, dpl, selector, ss_dpl, cpl;
635 int has_error_code, new_stack, shift;
636 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
637 uint32_t old_eip, sp_mask;
638
639#ifdef VBOX
640# ifdef VBOX_WITH_VMI
641 if ( intno == 6
642 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
643 {
644 env->exception_index = EXCP_PARAV_CALL;
645 cpu_loop_exit();
646 }
647# endif
648 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
649 cpu_loop_exit();
650#endif
651
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
664 }
665 }
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
670
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
702 }
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
712 }
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
715 /* check privledge if software int */
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
725
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner priviledge */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752#ifdef VBOX /* See page 3-477 of 253666.pdf */
753 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
754#else
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756#endif
757 new_stack = 1;
758 sp_mask = get_sp_mask(ss_e2);
759 ssp = get_seg_base(ss_e1, ss_e2);
760#if defined(VBOX) && defined(DEBUG)
761 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
762#endif
763 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
764 /* to same priviledge */
765 if (env->eflags & VM_MASK)
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0;
768 sp_mask = get_sp_mask(env->segs[R_SS].flags);
769 ssp = env->segs[R_SS].base;
770 esp = ESP;
771 dpl = cpl;
772 } else {
773 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
774 new_stack = 0; /* avoid warning */
775 sp_mask = 0; /* avoid warning */
776 ssp = 0; /* avoid warning */
777 esp = 0; /* avoid warning */
778 }
779
780 shift = type >> 3;
781
782#if 0
783 /* XXX: check that enough room is available */
784 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
785 if (env->eflags & VM_MASK)
786 push_size += 8;
787 push_size <<= shift;
788#endif
789 if (shift == 1) {
790 if (new_stack) {
791 if (env->eflags & VM_MASK) {
792 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
793 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
796 }
797 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
798 PUSHL(ssp, esp, sp_mask, ESP);
799 }
800 PUSHL(ssp, esp, sp_mask, compute_eflags());
801 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
802 PUSHL(ssp, esp, sp_mask, old_eip);
803 if (has_error_code) {
804 PUSHL(ssp, esp, sp_mask, error_code);
805 }
806 } else {
807 if (new_stack) {
808 if (env->eflags & VM_MASK) {
809 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
810 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
813 }
814 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
815 PUSHW(ssp, esp, sp_mask, ESP);
816 }
817 PUSHW(ssp, esp, sp_mask, compute_eflags());
818 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
819 PUSHW(ssp, esp, sp_mask, old_eip);
820 if (has_error_code) {
821 PUSHW(ssp, esp, sp_mask, error_code);
822 }
823 }
824
825 if (new_stack) {
826 if (env->eflags & VM_MASK) {
827 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
828 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
831 }
832 ss = (ss & ~3) | dpl;
833 cpu_x86_load_seg_cache(env, R_SS, ss,
834 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
835 }
836 SET_ESP(esp, sp_mask);
837
838 selector = (selector & ~3) | dpl;
839 cpu_x86_load_seg_cache(env, R_CS, selector,
840 get_seg_base(e1, e2),
841 get_seg_limit(e1, e2),
842 e2);
843 cpu_x86_set_cpl(env, dpl);
844 env->eip = offset;
845
846 /* interrupt gate clear IF mask */
847 if ((type & 1) == 0) {
848 env->eflags &= ~IF_MASK;
849 }
850 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
851}
852
853#ifdef VBOX
854
855/* check if VME interrupt redirection is enabled in TSS */
856static inline bool is_vme_irq_redirected(int intno)
857{
858 int io_offset, intredir_offset;
859 unsigned char val, mask;
860
861 /* TSS must be a valid 32 bit one */
862 if (!(env->tr.flags & DESC_P_MASK) ||
863 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
864 env->tr.limit < 103)
865 goto fail;
866 io_offset = lduw_kernel(env->tr.base + 0x66);
867 /* the virtual interrupt redirection bitmap is located below the io bitmap */
868 intredir_offset = io_offset - 0x20;
869
870 intredir_offset += (intno >> 3);
871 if ((intredir_offset) > env->tr.limit)
872 goto fail;
873
874 val = ldub_kernel(env->tr.base + intredir_offset);
875 mask = 1 << (unsigned char)(intno & 7);
876
877 /* bit set means no redirection. */
878 if ((val & mask) != 0) {
879 return false;
880 }
881 return true;
882
883fail:
884 raise_exception_err(EXCP0D_GPF, 0);
885 return true;
886}
887
888/* V86 mode software interrupt with CR4.VME=1 */
889static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
890{
891 target_ulong ptr, ssp;
892 int selector;
893 uint32_t offset, esp;
894 uint32_t old_cs, old_eflags;
895 uint32_t iopl;
896
897 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
898
899 if (!is_vme_irq_redirected(intno))
900 {
901 if (iopl == 3)
902 /* normal protected mode handler call */
903 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
904 else
905 raise_exception_err(EXCP0D_GPF, 0);
906 }
907
908 /* virtual mode idt is at linear address 0 */
909 ptr = 0 + intno * 4;
910 offset = lduw_kernel(ptr);
911 selector = lduw_kernel(ptr + 2);
912 esp = ESP;
913 ssp = env->segs[R_SS].base;
914 old_cs = env->segs[R_CS].selector;
915
916 old_eflags = compute_eflags();
917 if (iopl < 3)
918 {
919 /* copy VIF into IF and set IOPL to 3 */
920 if (env->eflags & VIF_MASK)
921 old_eflags |= IF_MASK;
922 else
923 old_eflags &= ~IF_MASK;
924
925 old_eflags |= (3 << IOPL_SHIFT);
926 }
927
928 /* XXX: use SS segment size ? */
929 PUSHW(ssp, esp, 0xffff, old_eflags);
930 PUSHW(ssp, esp, 0xffff, old_cs);
931 PUSHW(ssp, esp, 0xffff, next_eip);
932
933 /* update processor state */
934 ESP = (ESP & ~0xffff) | (esp & 0xffff);
935 env->eip = offset;
936 env->segs[R_CS].selector = selector;
937 env->segs[R_CS].base = (selector << 4);
938 env->eflags &= ~(TF_MASK | RF_MASK);
939
940 if (iopl < 3)
941 env->eflags &= ~VIF_MASK;
942 else
943 env->eflags &= ~IF_MASK;
944}
945#endif /* VBOX */
946
947#ifdef TARGET_X86_64
948
949#define PUSHQ(sp, val)\
950{\
951 sp -= 8;\
952 stq_kernel(sp, (val));\
953}
954
955#define POPQ(sp, val)\
956{\
957 val = ldq_kernel(sp);\
958 sp += 8;\
959}
960
961static inline target_ulong get_rsp_from_tss(int level)
962{
963 int index;
964
965#if 0
966 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
967 env->tr.base, env->tr.limit);
968#endif
969
970 if (!(env->tr.flags & DESC_P_MASK))
971 cpu_abort(env, "invalid tss");
972 index = 8 * level + 4;
973 if ((index + 7) > env->tr.limit)
974 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
975 return ldq_kernel(env->tr.base + index);
976}
977
978/* 64 bit interrupt */
979static void do_interrupt64(int intno, int is_int, int error_code,
980 target_ulong next_eip, int is_hw)
981{
982 SegmentCache *dt;
983 target_ulong ptr;
984 int type, dpl, selector, cpl, ist;
985 int has_error_code, new_stack;
986 uint32_t e1, e2, e3, ss;
987 target_ulong old_eip, esp, offset;
988
989#ifdef VBOX
990 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
991 cpu_loop_exit();
992#endif
993
994 has_error_code = 0;
995 if (!is_int && !is_hw) {
996 switch(intno) {
997 case 8:
998 case 10:
999 case 11:
1000 case 12:
1001 case 13:
1002 case 14:
1003 case 17:
1004 has_error_code = 1;
1005 break;
1006 }
1007 }
1008 if (is_int)
1009 old_eip = next_eip;
1010 else
1011 old_eip = env->eip;
1012
1013 dt = &env->idt;
1014 if (intno * 16 + 15 > dt->limit)
1015 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1016 ptr = dt->base + intno * 16;
1017 e1 = ldl_kernel(ptr);
1018 e2 = ldl_kernel(ptr + 4);
1019 e3 = ldl_kernel(ptr + 8);
1020 /* check gate type */
1021 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1022 switch(type) {
1023 case 14: /* 386 interrupt gate */
1024 case 15: /* 386 trap gate */
1025 break;
1026 default:
1027 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1028 break;
1029 }
1030 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1031 cpl = env->hflags & HF_CPL_MASK;
1032 /* check privledge if software int */
1033 if (is_int && dpl < cpl)
1034 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1035 /* check valid bit */
1036 if (!(e2 & DESC_P_MASK))
1037 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1038 selector = e1 >> 16;
1039 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1040 ist = e2 & 7;
1041 if ((selector & 0xfffc) == 0)
1042 raise_exception_err(EXCP0D_GPF, 0);
1043
1044 if (load_segment(&e1, &e2, selector) != 0)
1045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1046 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1048 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1049 if (dpl > cpl)
1050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1051 if (!(e2 & DESC_P_MASK))
1052 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1053 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1054 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1055 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1056 /* to inner priviledge */
1057 if (ist != 0)
1058 esp = get_rsp_from_tss(ist + 3);
1059 else
1060 esp = get_rsp_from_tss(dpl);
1061 esp &= ~0xfLL; /* align stack */
1062 ss = 0;
1063 new_stack = 1;
1064 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1065 /* to same priviledge */
1066 if (env->eflags & VM_MASK)
1067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1068 new_stack = 0;
1069 if (ist != 0)
1070 esp = get_rsp_from_tss(ist + 3);
1071 else
1072 esp = ESP;
1073 esp &= ~0xfLL; /* align stack */
1074 dpl = cpl;
1075 } else {
1076 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1077 new_stack = 0; /* avoid warning */
1078 esp = 0; /* avoid warning */
1079 }
1080
1081 PUSHQ(esp, env->segs[R_SS].selector);
1082 PUSHQ(esp, ESP);
1083 PUSHQ(esp, compute_eflags());
1084 PUSHQ(esp, env->segs[R_CS].selector);
1085 PUSHQ(esp, old_eip);
1086 if (has_error_code) {
1087 PUSHQ(esp, error_code);
1088 }
1089
1090 if (new_stack) {
1091 ss = 0 | dpl;
1092 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1093 }
1094 ESP = esp;
1095
1096 selector = (selector & ~3) | dpl;
1097 cpu_x86_load_seg_cache(env, R_CS, selector,
1098 get_seg_base(e1, e2),
1099 get_seg_limit(e1, e2),
1100 e2);
1101 cpu_x86_set_cpl(env, dpl);
1102 env->eip = offset;
1103
1104 /* interrupt gate clear IF mask */
1105 if ((type & 1) == 0) {
1106 env->eflags &= ~IF_MASK;
1107 }
1108 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1109}
1110#endif
1111
1112void helper_syscall(int next_eip_addend)
1113{
1114 int selector;
1115
1116 if (!(env->efer & MSR_EFER_SCE)) {
1117 raise_exception_err(EXCP06_ILLOP, 0);
1118 }
1119 selector = (env->star >> 32) & 0xffff;
1120#ifdef TARGET_X86_64
1121 if (env->hflags & HF_LMA_MASK) {
1122 int code64;
1123
1124 ECX = env->eip + next_eip_addend;
1125 env->regs[11] = compute_eflags();
1126
1127 code64 = env->hflags & HF_CS64_MASK;
1128
1129 cpu_x86_set_cpl(env, 0);
1130 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1131 0, 0xffffffff,
1132 DESC_G_MASK | DESC_P_MASK |
1133 DESC_S_MASK |
1134 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1135 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1136 0, 0xffffffff,
1137 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1138 DESC_S_MASK |
1139 DESC_W_MASK | DESC_A_MASK);
1140 env->eflags &= ~env->fmask;
1141 load_eflags(env->eflags, 0);
1142 if (code64)
1143 env->eip = env->lstar;
1144 else
1145 env->eip = env->cstar;
1146 } else
1147#endif
1148 {
1149 ECX = (uint32_t)(env->eip + next_eip_addend);
1150
1151 cpu_x86_set_cpl(env, 0);
1152 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1157 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1158 0, 0xffffffff,
1159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1160 DESC_S_MASK |
1161 DESC_W_MASK | DESC_A_MASK);
1162 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1163 env->eip = (uint32_t)env->star;
1164 }
1165}
1166
1167void helper_sysret(int dflag)
1168{
1169 int cpl, selector;
1170
1171 if (!(env->efer & MSR_EFER_SCE)) {
1172 raise_exception_err(EXCP06_ILLOP, 0);
1173 }
1174 cpl = env->hflags & HF_CPL_MASK;
1175 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1176 raise_exception_err(EXCP0D_GPF, 0);
1177 }
1178 selector = (env->star >> 48) & 0xffff;
1179#ifdef TARGET_X86_64
1180 if (env->hflags & HF_LMA_MASK) {
1181 if (dflag == 2) {
1182 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1183 0, 0xffffffff,
1184 DESC_G_MASK | DESC_P_MASK |
1185 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1186 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1187 DESC_L_MASK);
1188 env->eip = ECX;
1189 } else {
1190 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1191 0, 0xffffffff,
1192 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1193 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1194 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1195 env->eip = (uint32_t)ECX;
1196 }
1197 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1198 0, 0xffffffff,
1199 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1200 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1201 DESC_W_MASK | DESC_A_MASK);
1202 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1203 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1204 cpu_x86_set_cpl(env, 3);
1205 } else
1206#endif
1207 {
1208 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1209 0, 0xffffffff,
1210 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1211 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1212 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1213 env->eip = (uint32_t)ECX;
1214 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1215 0, 0xffffffff,
1216 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1217 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1218 DESC_W_MASK | DESC_A_MASK);
1219 env->eflags |= IF_MASK;
1220 cpu_x86_set_cpl(env, 3);
1221 }
1222#ifdef USE_KQEMU
1223 if (kqemu_is_ok(env)) {
1224 if (env->hflags & HF_LMA_MASK)
1225 CC_OP = CC_OP_EFLAGS;
1226 env->exception_index = -1;
1227 cpu_loop_exit();
1228 }
1229#endif
1230}
1231
1232#ifdef VBOX
1233/**
1234 * Checks and processes external VMM events.
1235 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1236 */
1237void helper_external_event(void)
1238{
1239#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1240 uintptr_t uESP;
1241 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1242 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1243#endif
1244 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1245 {
1246 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1247 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1248 }
1249 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1250 {
1251 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1252 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1253 }
1254 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1255 {
1256 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1257 remR3DmaRun(env);
1258 }
1259 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1260 {
1261 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1262 remR3TimersRun(env);
1263 }
1264}
1265/* helper for recording call instruction addresses for later scanning */
1266void helper_record_call()
1267{
1268 if ( !(env->state & CPU_RAW_RING0)
1269 && (env->cr[0] & CR0_PG_MASK)
1270 && !(env->eflags & X86_EFL_IF))
1271 remR3RecordCall(env);
1272}
1273#endif /* VBOX */
1274
1275/* real mode interrupt */
1276static void do_interrupt_real(int intno, int is_int, int error_code,
1277 unsigned int next_eip)
1278{
1279 SegmentCache *dt;
1280 target_ulong ptr, ssp;
1281 int selector;
1282 uint32_t offset, esp;
1283 uint32_t old_cs, old_eip;
1284
1285 /* real mode (simpler !) */
1286 dt = &env->idt;
1287 if (intno * 4 + 3 > dt->limit)
1288 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1289 ptr = dt->base + intno * 4;
1290 offset = lduw_kernel(ptr);
1291 selector = lduw_kernel(ptr + 2);
1292 esp = ESP;
1293 ssp = env->segs[R_SS].base;
1294 if (is_int)
1295 old_eip = next_eip;
1296 else
1297 old_eip = env->eip;
1298 old_cs = env->segs[R_CS].selector;
1299 /* XXX: use SS segment size ? */
1300 PUSHW(ssp, esp, 0xffff, compute_eflags());
1301 PUSHW(ssp, esp, 0xffff, old_cs);
1302 PUSHW(ssp, esp, 0xffff, old_eip);
1303
1304 /* update processor state */
1305 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1306 env->eip = offset;
1307 env->segs[R_CS].selector = selector;
1308 env->segs[R_CS].base = (selector << 4);
1309 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1310}
1311
1312/* fake user mode interrupt */
1313void do_interrupt_user(int intno, int is_int, int error_code,
1314 target_ulong next_eip)
1315{
1316 SegmentCache *dt;
1317 target_ulong ptr;
1318 int dpl, cpl;
1319 uint32_t e2;
1320
1321 dt = &env->idt;
1322 ptr = dt->base + (intno * 8);
1323 e2 = ldl_kernel(ptr + 4);
1324
1325 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1326 cpl = env->hflags & HF_CPL_MASK;
1327 /* check privledge if software int */
1328 if (is_int && dpl < cpl)
1329 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1330
1331 /* Since we emulate only user space, we cannot do more than
1332 exiting the emulation with the suitable exception and error
1333 code */
1334 if (is_int)
1335 EIP = next_eip;
1336}
1337
1338/*
1339 * Begin execution of an interruption. is_int is TRUE if coming from
1340 * the int instruction. next_eip is the EIP value AFTER the interrupt
1341 * instruction. It is only relevant if is_int is TRUE.
1342 */
1343void do_interrupt(int intno, int is_int, int error_code,
1344 target_ulong next_eip, int is_hw)
1345{
1346 if (loglevel & CPU_LOG_INT) {
1347 if ((env->cr[0] & CR0_PE_MASK)) {
1348 static int count;
1349 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1350 count, intno, error_code, is_int,
1351 env->hflags & HF_CPL_MASK,
1352 env->segs[R_CS].selector, EIP,
1353 (int)env->segs[R_CS].base + EIP,
1354 env->segs[R_SS].selector, ESP);
1355 if (intno == 0x0e) {
1356 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1357 } else {
1358 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1359 }
1360 fprintf(logfile, "\n");
1361 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1362#if 0
1363 {
1364 int i;
1365 uint8_t *ptr;
1366 fprintf(logfile, " code=");
1367 ptr = env->segs[R_CS].base + env->eip;
1368 for(i = 0; i < 16; i++) {
1369 fprintf(logfile, " %02x", ldub(ptr + i));
1370 }
1371 fprintf(logfile, "\n");
1372 }
1373#endif
1374 count++;
1375 }
1376 }
1377 if (env->cr[0] & CR0_PE_MASK) {
1378#ifdef TARGET_X86_64
1379 if (env->hflags & HF_LMA_MASK) {
1380 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1381 } else
1382#endif
1383 {
1384#ifdef VBOX
1385 /* int xx *, v86 code and VME enabled? */
1386 if ( (env->eflags & VM_MASK)
1387 && (env->cr[4] & CR4_VME_MASK)
1388 && is_int
1389 && !is_hw
1390 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1391 )
1392 do_soft_interrupt_vme(intno, error_code, next_eip);
1393 else
1394#endif /* VBOX */
1395 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1396 }
1397 } else {
1398 do_interrupt_real(intno, is_int, error_code, next_eip);
1399 }
1400}
1401
1402/*
1403 * Signal an interruption. It is executed in the main CPU loop.
1404 * is_int is TRUE if coming from the int instruction. next_eip is the
1405 * EIP value AFTER the interrupt instruction. It is only relevant if
1406 * is_int is TRUE.
1407 */
1408void raise_interrupt(int intno, int is_int, int error_code,
1409 int next_eip_addend)
1410{
1411#if defined(VBOX) && defined(DEBUG)
1412 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %RGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1413#endif
1414 env->exception_index = intno;
1415 env->error_code = error_code;
1416 env->exception_is_int = is_int;
1417 env->exception_next_eip = env->eip + next_eip_addend;
1418 cpu_loop_exit();
1419}
1420
1421/* same as raise_exception_err, but do not restore global registers */
1422static void raise_exception_err_norestore(int exception_index, int error_code)
1423{
1424 env->exception_index = exception_index;
1425 env->error_code = error_code;
1426 env->exception_is_int = 0;
1427 env->exception_next_eip = 0;
1428 longjmp(env->jmp_env, 1);
1429}
1430
1431/* shortcuts to generate exceptions */
1432
1433void (raise_exception_err)(int exception_index, int error_code)
1434{
1435 raise_interrupt(exception_index, 0, error_code, 0);
1436}
1437
1438void raise_exception(int exception_index)
1439{
1440 raise_interrupt(exception_index, 0, 0, 0);
1441}
1442
1443/* SMM support */
1444
1445#if defined(CONFIG_USER_ONLY)
1446
1447void do_smm_enter(void)
1448{
1449}
1450
1451void helper_rsm(void)
1452{
1453}
1454
1455#else
1456
1457#ifdef TARGET_X86_64
1458#define SMM_REVISION_ID 0x00020064
1459#else
1460#define SMM_REVISION_ID 0x00020000
1461#endif
1462
1463void do_smm_enter(void)
1464{
1465#ifdef VBOX
1466 cpu_abort(env, "do_ssm_enter");
1467#else /* !VBOX */
1468 target_ulong sm_state;
1469 SegmentCache *dt;
1470 int i, offset;
1471
1472 if (loglevel & CPU_LOG_INT) {
1473 fprintf(logfile, "SMM: enter\n");
1474 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1475 }
1476
1477 env->hflags |= HF_SMM_MASK;
1478 cpu_smm_update(env);
1479
1480 sm_state = env->smbase + 0x8000;
1481
1482#ifdef TARGET_X86_64
1483 for(i = 0; i < 6; i++) {
1484 dt = &env->segs[i];
1485 offset = 0x7e00 + i * 16;
1486 stw_phys(sm_state + offset, dt->selector);
1487 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1488 stl_phys(sm_state + offset + 4, dt->limit);
1489 stq_phys(sm_state + offset + 8, dt->base);
1490 }
1491
1492 stq_phys(sm_state + 0x7e68, env->gdt.base);
1493 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1494
1495 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1496 stq_phys(sm_state + 0x7e78, env->ldt.base);
1497 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1498 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1499
1500 stq_phys(sm_state + 0x7e88, env->idt.base);
1501 stl_phys(sm_state + 0x7e84, env->idt.limit);
1502
1503 stw_phys(sm_state + 0x7e90, env->tr.selector);
1504 stq_phys(sm_state + 0x7e98, env->tr.base);
1505 stl_phys(sm_state + 0x7e94, env->tr.limit);
1506 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1507
1508 stq_phys(sm_state + 0x7ed0, env->efer);
1509
1510 stq_phys(sm_state + 0x7ff8, EAX);
1511 stq_phys(sm_state + 0x7ff0, ECX);
1512 stq_phys(sm_state + 0x7fe8, EDX);
1513 stq_phys(sm_state + 0x7fe0, EBX);
1514 stq_phys(sm_state + 0x7fd8, ESP);
1515 stq_phys(sm_state + 0x7fd0, EBP);
1516 stq_phys(sm_state + 0x7fc8, ESI);
1517 stq_phys(sm_state + 0x7fc0, EDI);
1518 for(i = 8; i < 16; i++)
1519 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1520 stq_phys(sm_state + 0x7f78, env->eip);
1521 stl_phys(sm_state + 0x7f70, compute_eflags());
1522 stl_phys(sm_state + 0x7f68, env->dr[6]);
1523 stl_phys(sm_state + 0x7f60, env->dr[7]);
1524
1525 stl_phys(sm_state + 0x7f48, env->cr[4]);
1526 stl_phys(sm_state + 0x7f50, env->cr[3]);
1527 stl_phys(sm_state + 0x7f58, env->cr[0]);
1528
1529 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1530 stl_phys(sm_state + 0x7f00, env->smbase);
1531#else
1532 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1533 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1534 stl_phys(sm_state + 0x7ff4, compute_eflags());
1535 stl_phys(sm_state + 0x7ff0, env->eip);
1536 stl_phys(sm_state + 0x7fec, EDI);
1537 stl_phys(sm_state + 0x7fe8, ESI);
1538 stl_phys(sm_state + 0x7fe4, EBP);
1539 stl_phys(sm_state + 0x7fe0, ESP);
1540 stl_phys(sm_state + 0x7fdc, EBX);
1541 stl_phys(sm_state + 0x7fd8, EDX);
1542 stl_phys(sm_state + 0x7fd4, ECX);
1543 stl_phys(sm_state + 0x7fd0, EAX);
1544 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1545 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1546
1547 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1548 stl_phys(sm_state + 0x7f64, env->tr.base);
1549 stl_phys(sm_state + 0x7f60, env->tr.limit);
1550 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1551
1552 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1553 stl_phys(sm_state + 0x7f80, env->ldt.base);
1554 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1555 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1556
1557 stl_phys(sm_state + 0x7f74, env->gdt.base);
1558 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1559
1560 stl_phys(sm_state + 0x7f58, env->idt.base);
1561 stl_phys(sm_state + 0x7f54, env->idt.limit);
1562
1563 for(i = 0; i < 6; i++) {
1564 dt = &env->segs[i];
1565 if (i < 3)
1566 offset = 0x7f84 + i * 12;
1567 else
1568 offset = 0x7f2c + (i - 3) * 12;
1569 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1570 stl_phys(sm_state + offset + 8, dt->base);
1571 stl_phys(sm_state + offset + 4, dt->limit);
1572 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1573 }
1574 stl_phys(sm_state + 0x7f14, env->cr[4]);
1575
1576 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1577 stl_phys(sm_state + 0x7ef8, env->smbase);
1578#endif
1579 /* init SMM cpu state */
1580
1581#ifdef TARGET_X86_64
1582 env->efer = 0;
1583 env->hflags &= ~HF_LMA_MASK;
1584#endif
1585 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1586 env->eip = 0x00008000;
1587 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1588 0xffffffff, 0);
1589 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1590 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1591 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1592 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1593 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1594
1595 cpu_x86_update_cr0(env,
1596 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1597 cpu_x86_update_cr4(env, 0);
1598 env->dr[7] = 0x00000400;
1599 CC_OP = CC_OP_EFLAGS;
1600#endif /* VBOX */
1601}
1602
1603void helper_rsm(void)
1604{
1605#ifdef VBOX
1606 cpu_abort(env, "helper_rsm");
1607#else /* !VBOX */
1608 target_ulong sm_state;
1609 int i, offset;
1610 uint32_t val;
1611
1612 sm_state = env->smbase + 0x8000;
1613#ifdef TARGET_X86_64
1614 env->efer = ldq_phys(sm_state + 0x7ed0);
1615 if (env->efer & MSR_EFER_LMA)
1616 env->hflags |= HF_LMA_MASK;
1617 else
1618 env->hflags &= ~HF_LMA_MASK;
1619
1620 for(i = 0; i < 6; i++) {
1621 offset = 0x7e00 + i * 16;
1622 cpu_x86_load_seg_cache(env, i,
1623 lduw_phys(sm_state + offset),
1624 ldq_phys(sm_state + offset + 8),
1625 ldl_phys(sm_state + offset + 4),
1626 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1627 }
1628
1629 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1630 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1631
1632 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1633 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1634 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1635 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1636
1637 env->idt.base = ldq_phys(sm_state + 0x7e88);
1638 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1639
1640 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1641 env->tr.base = ldq_phys(sm_state + 0x7e98);
1642 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1643 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1644
1645 EAX = ldq_phys(sm_state + 0x7ff8);
1646 ECX = ldq_phys(sm_state + 0x7ff0);
1647 EDX = ldq_phys(sm_state + 0x7fe8);
1648 EBX = ldq_phys(sm_state + 0x7fe0);
1649 ESP = ldq_phys(sm_state + 0x7fd8);
1650 EBP = ldq_phys(sm_state + 0x7fd0);
1651 ESI = ldq_phys(sm_state + 0x7fc8);
1652 EDI = ldq_phys(sm_state + 0x7fc0);
1653 for(i = 8; i < 16; i++)
1654 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1655 env->eip = ldq_phys(sm_state + 0x7f78);
1656 load_eflags(ldl_phys(sm_state + 0x7f70),
1657 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1658 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1659 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1660
1661 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1662 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1663 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1664
1665 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1666 if (val & 0x20000) {
1667 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1668 }
1669#else
1670 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1671 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1672 load_eflags(ldl_phys(sm_state + 0x7ff4),
1673 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1674 env->eip = ldl_phys(sm_state + 0x7ff0);
1675 EDI = ldl_phys(sm_state + 0x7fec);
1676 ESI = ldl_phys(sm_state + 0x7fe8);
1677 EBP = ldl_phys(sm_state + 0x7fe4);
1678 ESP = ldl_phys(sm_state + 0x7fe0);
1679 EBX = ldl_phys(sm_state + 0x7fdc);
1680 EDX = ldl_phys(sm_state + 0x7fd8);
1681 ECX = ldl_phys(sm_state + 0x7fd4);
1682 EAX = ldl_phys(sm_state + 0x7fd0);
1683 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1684 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1685
1686 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1687 env->tr.base = ldl_phys(sm_state + 0x7f64);
1688 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1689 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1690
1691 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1692 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1693 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1694 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1695
1696 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1697 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1698
1699 env->idt.base = ldl_phys(sm_state + 0x7f58);
1700 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1701
1702 for(i = 0; i < 6; i++) {
1703 if (i < 3)
1704 offset = 0x7f84 + i * 12;
1705 else
1706 offset = 0x7f2c + (i - 3) * 12;
1707 cpu_x86_load_seg_cache(env, i,
1708 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1709 ldl_phys(sm_state + offset + 8),
1710 ldl_phys(sm_state + offset + 4),
1711 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1712 }
1713 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1714
1715 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1716 if (val & 0x20000) {
1717 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1718 }
1719#endif
1720 CC_OP = CC_OP_EFLAGS;
1721 env->hflags &= ~HF_SMM_MASK;
1722 cpu_smm_update(env);
1723
1724 if (loglevel & CPU_LOG_INT) {
1725 fprintf(logfile, "SMM: after RSM\n");
1726 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1727 }
1728#endif /* !VBOX */
1729}
1730
1731#endif /* !CONFIG_USER_ONLY */
1732
1733
1734#ifdef BUGGY_GCC_DIV64
1735/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1736 call it from another function */
1737uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1738{
1739 *q_ptr = num / den;
1740 return num % den;
1741}
1742
1743int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1744{
1745 *q_ptr = num / den;
1746 return num % den;
1747}
1748#endif
1749
1750void helper_divl_EAX_T0(void)
1751{
1752 unsigned int den, r;
1753 uint64_t num, q;
1754
1755 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1756 den = T0;
1757 if (den == 0) {
1758 raise_exception(EXCP00_DIVZ);
1759 }
1760#ifdef BUGGY_GCC_DIV64
1761 r = div32(&q, num, den);
1762#else
1763 q = (num / den);
1764 r = (num % den);
1765#endif
1766 if (q > 0xffffffff)
1767 raise_exception(EXCP00_DIVZ);
1768 EAX = (uint32_t)q;
1769 EDX = (uint32_t)r;
1770}
1771
1772void helper_idivl_EAX_T0(void)
1773{
1774 int den, r;
1775 int64_t num, q;
1776
1777 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1778 den = T0;
1779 if (den == 0) {
1780 raise_exception(EXCP00_DIVZ);
1781 }
1782#ifdef BUGGY_GCC_DIV64
1783 r = idiv32(&q, num, den);
1784#else
1785 q = (num / den);
1786 r = (num % den);
1787#endif
1788 if (q != (int32_t)q)
1789 raise_exception(EXCP00_DIVZ);
1790 EAX = (uint32_t)q;
1791 EDX = (uint32_t)r;
1792}
1793
1794void helper_cmpxchg8b(void)
1795{
1796 uint64_t d;
1797 int eflags;
1798
1799 eflags = cc_table[CC_OP].compute_all();
1800 d = ldq(A0);
1801 if (d == (((uint64_t)EDX << 32) | EAX)) {
1802 stq(A0, ((uint64_t)ECX << 32) | EBX);
1803 eflags |= CC_Z;
1804 } else {
1805 /* always do the store */
1806 stq(A0, d);
1807 EDX = (uint32_t)(d >> 32);
1808 EAX = (uint32_t)d;
1809 eflags &= ~CC_Z;
1810 }
1811 CC_SRC = eflags;
1812}
1813
1814void helper_single_step()
1815{
1816 env->dr[6] |= 0x4000;
1817 raise_exception(EXCP01_SSTP);
1818}
1819
1820void helper_cpuid(void)
1821{
1822#ifndef VBOX
1823 uint32_t index;
1824 index = (uint32_t)EAX;
1825
1826 /* test if maximum index reached */
1827 if (index & 0x80000000) {
1828 if (index > env->cpuid_xlevel)
1829 index = env->cpuid_level;
1830 } else {
1831 if (index > env->cpuid_level)
1832 index = env->cpuid_level;
1833 }
1834
1835 switch(index) {
1836 case 0:
1837 EAX = env->cpuid_level;
1838 EBX = env->cpuid_vendor1;
1839 EDX = env->cpuid_vendor2;
1840 ECX = env->cpuid_vendor3;
1841 break;
1842 case 1:
1843 EAX = env->cpuid_version;
1844 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1845 ECX = env->cpuid_ext_features;
1846 EDX = env->cpuid_features;
1847 break;
1848 case 2:
1849 /* cache info: needed for Pentium Pro compatibility */
1850 EAX = 0x410601;
1851 EBX = 0;
1852 ECX = 0;
1853 EDX = 0;
1854 break;
1855 case 0x80000000:
1856 EAX = env->cpuid_xlevel;
1857 EBX = env->cpuid_vendor1;
1858 EDX = env->cpuid_vendor2;
1859 ECX = env->cpuid_vendor3;
1860 break;
1861 case 0x80000001:
1862 EAX = env->cpuid_features;
1863 EBX = 0;
1864 ECX = 0;
1865 EDX = env->cpuid_ext2_features;
1866 break;
1867 case 0x80000002:
1868 case 0x80000003:
1869 case 0x80000004:
1870 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1871 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1872 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1873 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1874 break;
1875 case 0x80000005:
1876 /* cache info (L1 cache) */
1877 EAX = 0x01ff01ff;
1878 EBX = 0x01ff01ff;
1879 ECX = 0x40020140;
1880 EDX = 0x40020140;
1881 break;
1882 case 0x80000006:
1883 /* cache info (L2 cache) */
1884 EAX = 0;
1885 EBX = 0x42004200;
1886 ECX = 0x02008140;
1887 EDX = 0;
1888 break;
1889 case 0x80000008:
1890 /* virtual & phys address size in low 2 bytes. */
1891 EAX = 0x00003028;
1892 EBX = 0;
1893 ECX = 0;
1894 EDX = 0;
1895 break;
1896 default:
1897 /* reserved values: zero */
1898 EAX = 0;
1899 EBX = 0;
1900 ECX = 0;
1901 EDX = 0;
1902 break;
1903 }
1904#else /* VBOX */
1905 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1906#endif /* VBOX */
1907}
1908
1909void helper_enter_level(int level, int data32)
1910{
1911 target_ulong ssp;
1912 uint32_t esp_mask, esp, ebp;
1913
1914 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1915 ssp = env->segs[R_SS].base;
1916 ebp = EBP;
1917 esp = ESP;
1918 if (data32) {
1919 /* 32 bit */
1920 esp -= 4;
1921 while (--level) {
1922 esp -= 4;
1923 ebp -= 4;
1924 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1925 }
1926 esp -= 4;
1927 stl(ssp + (esp & esp_mask), T1);
1928 } else {
1929 /* 16 bit */
1930 esp -= 2;
1931 while (--level) {
1932 esp -= 2;
1933 ebp -= 2;
1934 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1935 }
1936 esp -= 2;
1937 stw(ssp + (esp & esp_mask), T1);
1938 }
1939}
1940
1941#ifdef TARGET_X86_64
1942void helper_enter64_level(int level, int data64)
1943{
1944 target_ulong esp, ebp;
1945 ebp = EBP;
1946 esp = ESP;
1947
1948 if (data64) {
1949 /* 64 bit */
1950 esp -= 8;
1951 while (--level) {
1952 esp -= 8;
1953 ebp -= 8;
1954 stq(esp, ldq(ebp));
1955 }
1956 esp -= 8;
1957 stq(esp, T1);
1958 } else {
1959 /* 16 bit */
1960 esp -= 2;
1961 while (--level) {
1962 esp -= 2;
1963 ebp -= 2;
1964 stw(esp, lduw(ebp));
1965 }
1966 esp -= 2;
1967 stw(esp, T1);
1968 }
1969}
1970#endif
1971
1972void helper_lldt_T0(void)
1973{
1974 int selector;
1975 SegmentCache *dt;
1976 uint32_t e1, e2;
1977 int index, entry_limit;
1978 target_ulong ptr;
1979#ifdef VBOX
1980 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%RGv, .limit=%RGv} new=%RTsel\n",
1981 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1982#endif
1983
1984 selector = T0 & 0xffff;
1985 if ((selector & 0xfffc) == 0) {
1986 /* XXX: NULL selector case: invalid LDT */
1987 env->ldt.base = 0;
1988 env->ldt.limit = 0;
1989 } else {
1990 if (selector & 0x4)
1991 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1992 dt = &env->gdt;
1993 index = selector & ~7;
1994#ifdef TARGET_X86_64
1995 if (env->hflags & HF_LMA_MASK)
1996 entry_limit = 15;
1997 else
1998#endif
1999 entry_limit = 7;
2000 if ((index + entry_limit) > dt->limit)
2001 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2002 ptr = dt->base + index;
2003 e1 = ldl_kernel(ptr);
2004 e2 = ldl_kernel(ptr + 4);
2005 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2006 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2007 if (!(e2 & DESC_P_MASK))
2008 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2009#ifdef TARGET_X86_64
2010 if (env->hflags & HF_LMA_MASK) {
2011 uint32_t e3;
2012 e3 = ldl_kernel(ptr + 8);
2013 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2014 env->ldt.base |= (target_ulong)e3 << 32;
2015 } else
2016#endif
2017 {
2018 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2019 }
2020 }
2021 env->ldt.selector = selector;
2022#ifdef VBOX
2023 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%RGv, .limit=%RGv}\n",
2024 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2025#endif
2026}
2027
2028void helper_ltr_T0(void)
2029{
2030 int selector;
2031 SegmentCache *dt;
2032 uint32_t e1, e2;
2033 int index, type, entry_limit;
2034 target_ulong ptr;
2035
2036#ifdef VBOX
2037 Log(("helper_ltr_T0: old tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2038 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2039 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2040#endif
2041
2042 selector = T0 & 0xffff;
2043 if ((selector & 0xfffc) == 0) {
2044 /* NULL selector case: invalid TR */
2045 env->tr.base = 0;
2046 env->tr.limit = 0;
2047 env->tr.flags = 0;
2048 } else {
2049 if (selector & 0x4)
2050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051 dt = &env->gdt;
2052 index = selector & ~7;
2053#ifdef TARGET_X86_64
2054 if (env->hflags & HF_LMA_MASK)
2055 entry_limit = 15;
2056 else
2057#endif
2058 entry_limit = 7;
2059 if ((index + entry_limit) > dt->limit)
2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061 ptr = dt->base + index;
2062 e1 = ldl_kernel(ptr);
2063 e2 = ldl_kernel(ptr + 4);
2064 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2065 if ((e2 & DESC_S_MASK) ||
2066 (type != 1 && type != 9))
2067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068 if (!(e2 & DESC_P_MASK))
2069 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2070#ifdef TARGET_X86_64
2071 if (env->hflags & HF_LMA_MASK) {
2072 uint32_t e3;
2073 e3 = ldl_kernel(ptr + 8);
2074 load_seg_cache_raw_dt(&env->tr, e1, e2);
2075 env->tr.base |= (target_ulong)e3 << 32;
2076 } else
2077#endif
2078 {
2079 load_seg_cache_raw_dt(&env->tr, e1, e2);
2080 }
2081 e2 |= DESC_TSS_BUSY_MASK;
2082 stl_kernel(ptr + 4, e2);
2083 }
2084 env->tr.selector = selector;
2085#ifdef VBOX
2086 Log(("helper_ltr_T0: new tr=%RTsel {.base=%RGv, .limit=%RGv, .flags=%RX32} new=%RTsel\n",
2087 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2088 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2089#endif
2090}
2091
2092/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2093void load_seg(int seg_reg, int selector)
2094{
2095 uint32_t e1, e2;
2096 int cpl, dpl, rpl;
2097 SegmentCache *dt;
2098 int index;
2099 target_ulong ptr;
2100
2101 selector &= 0xffff;
2102 cpl = env->hflags & HF_CPL_MASK;
2103
2104#ifdef VBOX
2105 /* Trying to load a selector with CPL=1? */
2106 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2107 {
2108 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2109 selector = selector & 0xfffc;
2110 }
2111#endif
2112
2113 if ((selector & 0xfffc) == 0) {
2114 /* null selector case */
2115 if (seg_reg == R_SS
2116#ifdef TARGET_X86_64
2117 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2118#endif
2119 )
2120 raise_exception_err(EXCP0D_GPF, 0);
2121 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2122 } else {
2123
2124 if (selector & 0x4)
2125 dt = &env->ldt;
2126 else
2127 dt = &env->gdt;
2128 index = selector & ~7;
2129 if ((index + 7) > dt->limit)
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 ptr = dt->base + index;
2132 e1 = ldl_kernel(ptr);
2133 e2 = ldl_kernel(ptr + 4);
2134
2135 if (!(e2 & DESC_S_MASK))
2136 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137 rpl = selector & 3;
2138 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2139 if (seg_reg == R_SS) {
2140 /* must be writable segment */
2141 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2142 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2143 if (rpl != cpl || dpl != cpl)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 } else {
2146 /* must be readable segment */
2147 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2148 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2149
2150 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2151 /* if not conforming code, test rights */
2152 if (dpl < cpl || dpl < rpl)
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154 }
2155 }
2156
2157 if (!(e2 & DESC_P_MASK)) {
2158 if (seg_reg == R_SS)
2159 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2160 else
2161 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2162 }
2163
2164 /* set the access bit if not already set */
2165 if (!(e2 & DESC_A_MASK)) {
2166 e2 |= DESC_A_MASK;
2167 stl_kernel(ptr + 4, e2);
2168 }
2169
2170 cpu_x86_load_seg_cache(env, seg_reg, selector,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
2174#if 0
2175 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2176 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2177#endif
2178 }
2179}
2180
2181/* protected mode jump */
2182void helper_ljmp_protected_T0_T1(int next_eip_addend)
2183{
2184 int new_cs, gate_cs, type;
2185 uint32_t e1, e2, cpl, dpl, rpl, limit;
2186 target_ulong new_eip, next_eip;
2187
2188 new_cs = T0;
2189 new_eip = T1;
2190 if ((new_cs & 0xfffc) == 0)
2191 raise_exception_err(EXCP0D_GPF, 0);
2192 if (load_segment(&e1, &e2, new_cs) != 0)
2193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2194 cpl = env->hflags & HF_CPL_MASK;
2195 if (e2 & DESC_S_MASK) {
2196 if (!(e2 & DESC_CS_MASK))
2197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2199 if (e2 & DESC_C_MASK) {
2200 /* conforming code segment */
2201 if (dpl > cpl)
2202 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203 } else {
2204 /* non conforming code segment */
2205 rpl = new_cs & 3;
2206 if (rpl > cpl)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 if (dpl != cpl)
2209 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210 }
2211 if (!(e2 & DESC_P_MASK))
2212 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2213 limit = get_seg_limit(e1, e2);
2214 if (new_eip > limit &&
2215 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2218 get_seg_base(e1, e2), limit, e2);
2219 EIP = new_eip;
2220 } else {
2221 /* jump to call or task gate */
2222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2223 rpl = new_cs & 3;
2224 cpl = env->hflags & HF_CPL_MASK;
2225 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2226 switch(type) {
2227 case 1: /* 286 TSS */
2228 case 9: /* 386 TSS */
2229 case 5: /* task gate */
2230 if (dpl < cpl || dpl < rpl)
2231 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232 next_eip = env->eip + next_eip_addend;
2233 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2234 CC_OP = CC_OP_EFLAGS;
2235 break;
2236 case 4: /* 286 call gate */
2237 case 12: /* 386 call gate */
2238 if ((dpl < cpl) || (dpl < rpl))
2239 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2240 if (!(e2 & DESC_P_MASK))
2241 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2242 gate_cs = e1 >> 16;
2243 new_eip = (e1 & 0xffff);
2244 if (type == 12)
2245 new_eip |= (e2 & 0xffff0000);
2246 if (load_segment(&e1, &e2, gate_cs) != 0)
2247 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2248 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2249 /* must be code segment */
2250 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2251 (DESC_S_MASK | DESC_CS_MASK)))
2252 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2253 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2254 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2255 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2256 if (!(e2 & DESC_P_MASK))
2257#ifdef VBOX /* See page 3-514 of 253666.pdf */
2258 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2259#else
2260 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261#endif
2262 limit = get_seg_limit(e1, e2);
2263 if (new_eip > limit)
2264 raise_exception_err(EXCP0D_GPF, 0);
2265 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2266 get_seg_base(e1, e2), limit, e2);
2267 EIP = new_eip;
2268 break;
2269 default:
2270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 break;
2272 }
2273 }
2274}
2275
2276/* real mode call */
2277void helper_lcall_real_T0_T1(int shift, int next_eip)
2278{
2279 int new_cs, new_eip;
2280 uint32_t esp, esp_mask;
2281 target_ulong ssp;
2282
2283 new_cs = T0;
2284 new_eip = T1;
2285 esp = ESP;
2286 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2287 ssp = env->segs[R_SS].base;
2288 if (shift) {
2289 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2290 PUSHL(ssp, esp, esp_mask, next_eip);
2291 } else {
2292 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2293 PUSHW(ssp, esp, esp_mask, next_eip);
2294 }
2295
2296 SET_ESP(esp, esp_mask);
2297 env->eip = new_eip;
2298 env->segs[R_CS].selector = new_cs;
2299 env->segs[R_CS].base = (new_cs << 4);
2300}
2301
2302/* protected mode call */
2303void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2304{
2305 int new_cs, new_stack, i;
2306 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2307 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2308 uint32_t val, limit, old_sp_mask;
2309 target_ulong ssp, old_ssp, next_eip, new_eip;
2310
2311 new_cs = T0;
2312 new_eip = T1;
2313 next_eip = env->eip + next_eip_addend;
2314#ifdef DEBUG_PCALL
2315 if (loglevel & CPU_LOG_PCALL) {
2316 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2317 new_cs, (uint32_t)new_eip, shift);
2318 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2319 }
2320#endif
2321 if ((new_cs & 0xfffc) == 0)
2322 raise_exception_err(EXCP0D_GPF, 0);
2323 if (load_segment(&e1, &e2, new_cs) != 0)
2324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 cpl = env->hflags & HF_CPL_MASK;
2326#ifdef DEBUG_PCALL
2327 if (loglevel & CPU_LOG_PCALL) {
2328 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2329 }
2330#endif
2331 if (e2 & DESC_S_MASK) {
2332 if (!(e2 & DESC_CS_MASK))
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335 if (e2 & DESC_C_MASK) {
2336 /* conforming code segment */
2337 if (dpl > cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 } else {
2340 /* non conforming code segment */
2341 rpl = new_cs & 3;
2342 if (rpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 if (dpl != cpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 }
2347 if (!(e2 & DESC_P_MASK))
2348 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349
2350#ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2352 if (shift == 2) {
2353 target_ulong rsp;
2354 /* 64 bit case */
2355 rsp = ESP;
2356 PUSHQ(rsp, env->segs[R_CS].selector);
2357 PUSHQ(rsp, next_eip);
2358 /* from this point, not restartable */
2359 ESP = rsp;
2360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361 get_seg_base(e1, e2),
2362 get_seg_limit(e1, e2), e2);
2363 EIP = new_eip;
2364 } else
2365#endif
2366 {
2367 sp = ESP;
2368 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369 ssp = env->segs[R_SS].base;
2370 if (shift) {
2371 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372 PUSHL(ssp, sp, sp_mask, next_eip);
2373 } else {
2374 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375 PUSHW(ssp, sp, sp_mask, next_eip);
2376 }
2377
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit)
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp, sp_mask);
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 }
2387 } else {
2388 /* check gate type */
2389 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 switch(type) {
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl < cpl || dpl < rpl)
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399 CC_OP = CC_OP_EFLAGS;
2400 return;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2403 break;
2404 default:
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 break;
2407 }
2408 shift = type >> 3;
2409
2410 if (dpl < cpl || dpl < rpl)
2411 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412 /* check valid bit */
2413 if (!(e2 & DESC_P_MASK))
2414 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2415 selector = e1 >> 16;
2416 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417 param_count = e2 & 0x1f;
2418 if ((selector & 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF, 0);
2420
2421 if (load_segment(&e1, &e2, selector) != 0)
2422 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 if (dpl > cpl)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_P_MASK))
2429 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430
2431 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432 /* to inner priviledge */
2433 get_ss_esp_from_tss(&ss, &sp, dpl);
2434#ifdef DEBUG_PCALL
2435 if (loglevel & CPU_LOG_PCALL)
2436 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2437 ss, sp, param_count, ESP);
2438#endif
2439 if ((ss & 0xfffc) == 0)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if ((ss & 3) != dpl)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2446 if (ss_dpl != dpl)
2447 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2448 if (!(ss_e2 & DESC_S_MASK) ||
2449 (ss_e2 & DESC_CS_MASK) ||
2450 !(ss_e2 & DESC_W_MASK))
2451 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452 if (!(ss_e2 & DESC_P_MASK))
2453#ifdef VBOX /* See page 3-99 of 253666.pdf */
2454 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2455#else
2456 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2457#endif
2458
2459 // push_size = ((param_count * 2) + 8) << shift;
2460
2461 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2462 old_ssp = env->segs[R_SS].base;
2463
2464 sp_mask = get_sp_mask(ss_e2);
2465 ssp = get_seg_base(ss_e1, ss_e2);
2466 if (shift) {
2467 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHL(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2471 PUSHL(ssp, sp, sp_mask, val);
2472 }
2473 } else {
2474 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2475 PUSHW(ssp, sp, sp_mask, ESP);
2476 for(i = param_count - 1; i >= 0; i--) {
2477 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2478 PUSHW(ssp, sp, sp_mask, val);
2479 }
2480 }
2481 new_stack = 1;
2482 } else {
2483 /* to same priviledge */
2484 sp = ESP;
2485 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2486 ssp = env->segs[R_SS].base;
2487 // push_size = (4 << shift);
2488 new_stack = 0;
2489 }
2490
2491 if (shift) {
2492 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2493 PUSHL(ssp, sp, sp_mask, next_eip);
2494 } else {
2495 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2496 PUSHW(ssp, sp, sp_mask, next_eip);
2497 }
2498
2499 /* from this point, not restartable */
2500
2501 if (new_stack) {
2502 ss = (ss & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_SS, ss,
2504 ssp,
2505 get_seg_limit(ss_e1, ss_e2),
2506 ss_e2);
2507 }
2508
2509 selector = (selector & ~3) | dpl;
2510 cpu_x86_load_seg_cache(env, R_CS, selector,
2511 get_seg_base(e1, e2),
2512 get_seg_limit(e1, e2),
2513 e2);
2514 cpu_x86_set_cpl(env, dpl);
2515 SET_ESP(sp, sp_mask);
2516 EIP = offset;
2517 }
2518#ifdef USE_KQEMU
2519 if (kqemu_is_ok(env)) {
2520 env->exception_index = -1;
2521 cpu_loop_exit();
2522 }
2523#endif
2524}
2525
2526/* real and vm86 mode iret */
2527void helper_iret_real(int shift)
2528{
2529 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2530 target_ulong ssp;
2531 int eflags_mask;
2532#ifdef VBOX
2533 bool fVME = false;
2534
2535 remR3TrapClear(env->pVM);
2536#endif /* VBOX */
2537
2538 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2539 sp = ESP;
2540 ssp = env->segs[R_SS].base;
2541 if (shift == 1) {
2542 /* 32 bits */
2543 POPL(ssp, sp, sp_mask, new_eip);
2544 POPL(ssp, sp, sp_mask, new_cs);
2545 new_cs &= 0xffff;
2546 POPL(ssp, sp, sp_mask, new_eflags);
2547 } else {
2548 /* 16 bits */
2549 POPW(ssp, sp, sp_mask, new_eip);
2550 POPW(ssp, sp, sp_mask, new_cs);
2551 POPW(ssp, sp, sp_mask, new_eflags);
2552 }
2553#ifdef VBOX
2554 if ( (env->eflags & VM_MASK)
2555 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2556 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2557 {
2558 fVME = true;
2559 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2560 /* if TF will be set -> #GP */
2561 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2562 || (new_eflags & TF_MASK))
2563 raise_exception(EXCP0D_GPF);
2564 }
2565#endif /* VBOX */
2566
2567 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2568 load_seg_vm(R_CS, new_cs);
2569 env->eip = new_eip;
2570#ifdef VBOX
2571 if (fVME)
2572 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2573 else
2574#endif
2575 if (env->eflags & VM_MASK)
2576 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2577 else
2578 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2579 if (shift == 0)
2580 eflags_mask &= 0xffff;
2581 load_eflags(new_eflags, eflags_mask);
2582
2583#ifdef VBOX
2584 if (fVME)
2585 {
2586 if (new_eflags & IF_MASK)
2587 env->eflags |= VIF_MASK;
2588 else
2589 env->eflags &= ~VIF_MASK;
2590 }
2591#endif /* VBOX */
2592}
2593
2594static inline void validate_seg(int seg_reg, int cpl)
2595{
2596 int dpl;
2597 uint32_t e2;
2598
2599 /* XXX: on x86_64, we do not want to nullify FS and GS because
2600 they may still contain a valid base. I would be interested to
2601 know how a real x86_64 CPU behaves */
2602 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2603 (env->segs[seg_reg].selector & 0xfffc) == 0)
2604 return;
2605
2606 e2 = env->segs[seg_reg].flags;
2607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2609 /* data or non conforming code segment */
2610 if (dpl < cpl) {
2611 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2612 }
2613 }
2614}
2615
2616/* protected mode iret */
2617static inline void helper_ret_protected(int shift, int is_iret, int addend)
2618{
2619 uint32_t new_cs, new_eflags, new_ss;
2620 uint32_t new_es, new_ds, new_fs, new_gs;
2621 uint32_t e1, e2, ss_e1, ss_e2;
2622 int cpl, dpl, rpl, eflags_mask, iopl;
2623 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2624
2625#ifdef TARGET_X86_64
2626 if (shift == 2)
2627 sp_mask = -1;
2628 else
2629#endif
2630 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2631 sp = ESP;
2632 ssp = env->segs[R_SS].base;
2633 new_eflags = 0; /* avoid warning */
2634#ifdef TARGET_X86_64
2635 if (shift == 2) {
2636 POPQ(sp, new_eip);
2637 POPQ(sp, new_cs);
2638 new_cs &= 0xffff;
2639 if (is_iret) {
2640 POPQ(sp, new_eflags);
2641 }
2642 } else
2643#endif
2644 if (shift == 1) {
2645 /* 32 bits */
2646 POPL(ssp, sp, sp_mask, new_eip);
2647 POPL(ssp, sp, sp_mask, new_cs);
2648 new_cs &= 0xffff;
2649 if (is_iret) {
2650 POPL(ssp, sp, sp_mask, new_eflags);
2651#if defined(VBOX) && defined(DEBUG)
2652 printf("iret: new CS %04X\n", new_cs);
2653 printf("iret: new EIP %08X\n", new_eip);
2654 printf("iret: new EFLAGS %08X\n", new_eflags);
2655 printf("iret: EAX=%08x\n", EAX);
2656#endif
2657
2658 if (new_eflags & VM_MASK)
2659 goto return_to_vm86;
2660 }
2661#ifdef VBOX
2662 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2663 {
2664#ifdef DEBUG
2665 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2666#endif
2667 new_cs = new_cs & 0xfffc;
2668 }
2669#endif
2670 } else {
2671 /* 16 bits */
2672 POPW(ssp, sp, sp_mask, new_eip);
2673 POPW(ssp, sp, sp_mask, new_cs);
2674 if (is_iret)
2675 POPW(ssp, sp, sp_mask, new_eflags);
2676 }
2677#ifdef DEBUG_PCALL
2678 if (loglevel & CPU_LOG_PCALL) {
2679 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2680 new_cs, new_eip, shift, addend);
2681 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2682 }
2683#endif
2684 if ((new_cs & 0xfffc) == 0)
2685 {
2686#if defined(VBOX) && defined(DEBUG)
2687 printf("new_cs & 0xfffc) == 0\n");
2688#endif
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 }
2691 if (load_segment(&e1, &e2, new_cs) != 0)
2692 {
2693#if defined(VBOX) && defined(DEBUG)
2694 printf("load_segment failed\n");
2695#endif
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 }
2698 if (!(e2 & DESC_S_MASK) ||
2699 !(e2 & DESC_CS_MASK))
2700 {
2701#if defined(VBOX) && defined(DEBUG)
2702 printf("e2 mask %08x\n", e2);
2703#endif
2704 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2705 }
2706 cpl = env->hflags & HF_CPL_MASK;
2707 rpl = new_cs & 3;
2708 if (rpl < cpl)
2709 {
2710#if defined(VBOX) && defined(DEBUG)
2711 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2712#endif
2713 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2714 }
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (e2 & DESC_C_MASK) {
2717 if (dpl > rpl)
2718 {
2719#if defined(VBOX) && defined(DEBUG)
2720 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2721#endif
2722 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723 }
2724 } else {
2725 if (dpl != rpl)
2726 {
2727#if defined(VBOX) && defined(DEBUG)
2728 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2729#endif
2730 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2731 }
2732 }
2733 if (!(e2 & DESC_P_MASK))
2734 {
2735#if defined(VBOX) && defined(DEBUG)
2736 printf("DESC_P_MASK e2=%08x\n", e2);
2737#endif
2738 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2739 }
2740 sp += addend;
2741 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2742 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2743 /* return to same priledge level */
2744 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2745 get_seg_base(e1, e2),
2746 get_seg_limit(e1, e2),
2747 e2);
2748 } else {
2749 /* return to different priviledge level */
2750#ifdef TARGET_X86_64
2751 if (shift == 2) {
2752 POPQ(sp, new_esp);
2753 POPQ(sp, new_ss);
2754 new_ss &= 0xffff;
2755 } else
2756#endif
2757 if (shift == 1) {
2758 /* 32 bits */
2759 POPL(ssp, sp, sp_mask, new_esp);
2760 POPL(ssp, sp, sp_mask, new_ss);
2761 new_ss &= 0xffff;
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_esp);
2765 POPW(ssp, sp, sp_mask, new_ss);
2766 }
2767#ifdef DEBUG_PCALL
2768 if (loglevel & CPU_LOG_PCALL) {
2769 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2770 new_ss, new_esp);
2771 }
2772#endif
2773 if ((new_ss & 0xfffc) == 0) {
2774#ifdef TARGET_X86_64
2775 /* NULL ss is allowed in long mode if cpl != 3*/
2776 /* XXX: test CS64 ? */
2777 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2778 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2779 0, 0xffffffff,
2780 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2781 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2782 DESC_W_MASK | DESC_A_MASK);
2783 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2784 } else
2785#endif
2786 {
2787 raise_exception_err(EXCP0D_GPF, 0);
2788 }
2789 } else {
2790 if ((new_ss & 3) != rpl)
2791 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2792 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794 if (!(ss_e2 & DESC_S_MASK) ||
2795 (ss_e2 & DESC_CS_MASK) ||
2796 !(ss_e2 & DESC_W_MASK))
2797 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2798 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2799 if (dpl != rpl)
2800 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2801 if (!(ss_e2 & DESC_P_MASK))
2802 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2803 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2804 get_seg_base(ss_e1, ss_e2),
2805 get_seg_limit(ss_e1, ss_e2),
2806 ss_e2);
2807 }
2808
2809 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2810 get_seg_base(e1, e2),
2811 get_seg_limit(e1, e2),
2812 e2);
2813 cpu_x86_set_cpl(env, rpl);
2814 sp = new_esp;
2815#ifdef TARGET_X86_64
2816 if (env->hflags & HF_CS64_MASK)
2817 sp_mask = -1;
2818 else
2819#endif
2820 sp_mask = get_sp_mask(ss_e2);
2821
2822 /* validate data segments */
2823 validate_seg(R_ES, rpl);
2824 validate_seg(R_DS, rpl);
2825 validate_seg(R_FS, rpl);
2826 validate_seg(R_GS, rpl);
2827
2828 sp += addend;
2829 }
2830 SET_ESP(sp, sp_mask);
2831 env->eip = new_eip;
2832 if (is_iret) {
2833 /* NOTE: 'cpl' is the _old_ CPL */
2834 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2835 if (cpl == 0)
2836#ifdef VBOX
2837 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2838#else
2839 eflags_mask |= IOPL_MASK;
2840#endif
2841 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2842 if (cpl <= iopl)
2843 eflags_mask |= IF_MASK;
2844 if (shift == 0)
2845 eflags_mask &= 0xffff;
2846 load_eflags(new_eflags, eflags_mask);
2847 }
2848 return;
2849
2850 return_to_vm86:
2851
2852#if 0 // defined(VBOX) && defined(DEBUG)
2853 printf("V86: new CS %04X\n", new_cs);
2854 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2855 printf("V86: new EIP %08X\n", new_eip);
2856 printf("V86: new EFLAGS %08X\n", new_eflags);
2857#endif
2858
2859 POPL(ssp, sp, sp_mask, new_esp);
2860 POPL(ssp, sp, sp_mask, new_ss);
2861 POPL(ssp, sp, sp_mask, new_es);
2862 POPL(ssp, sp, sp_mask, new_ds);
2863 POPL(ssp, sp, sp_mask, new_fs);
2864 POPL(ssp, sp, sp_mask, new_gs);
2865
2866 /* modify processor state */
2867 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2868 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2869 load_seg_vm(R_CS, new_cs & 0xffff);
2870 cpu_x86_set_cpl(env, 3);
2871 load_seg_vm(R_SS, new_ss & 0xffff);
2872 load_seg_vm(R_ES, new_es & 0xffff);
2873 load_seg_vm(R_DS, new_ds & 0xffff);
2874 load_seg_vm(R_FS, new_fs & 0xffff);
2875 load_seg_vm(R_GS, new_gs & 0xffff);
2876
2877 env->eip = new_eip & 0xffff;
2878 ESP = new_esp;
2879}
2880
2881void helper_iret_protected(int shift, int next_eip)
2882{
2883 int tss_selector, type;
2884 uint32_t e1, e2;
2885
2886#ifdef VBOX
2887 remR3TrapClear(env->pVM);
2888#endif
2889
2890 /* specific case for TSS */
2891 if (env->eflags & NT_MASK) {
2892#ifdef TARGET_X86_64
2893 if (env->hflags & HF_LMA_MASK)
2894 raise_exception_err(EXCP0D_GPF, 0);
2895#endif
2896 tss_selector = lduw_kernel(env->tr.base + 0);
2897 if (tss_selector & 4)
2898 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2899 if (load_segment(&e1, &e2, tss_selector) != 0)
2900 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2901 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2902 /* NOTE: we check both segment and busy TSS */
2903 if (type != 3)
2904 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2905 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2906 } else {
2907 helper_ret_protected(shift, 1, 0);
2908 }
2909#ifdef USE_KQEMU
2910 if (kqemu_is_ok(env)) {
2911 CC_OP = CC_OP_EFLAGS;
2912 env->exception_index = -1;
2913 cpu_loop_exit();
2914 }
2915#endif
2916}
2917
2918void helper_lret_protected(int shift, int addend)
2919{
2920 helper_ret_protected(shift, 0, addend);
2921#ifdef USE_KQEMU
2922 if (kqemu_is_ok(env)) {
2923 env->exception_index = -1;
2924 cpu_loop_exit();
2925 }
2926#endif
2927}
2928
2929void helper_sysenter(void)
2930{
2931 if (env->sysenter_cs == 0) {
2932 raise_exception_err(EXCP0D_GPF, 0);
2933 }
2934 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2935 cpu_x86_set_cpl(env, 0);
2936 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2937 0, 0xffffffff,
2938 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2939 DESC_S_MASK |
2940 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2941 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2942 0, 0xffffffff,
2943 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2944 DESC_S_MASK |
2945 DESC_W_MASK | DESC_A_MASK);
2946 ESP = env->sysenter_esp;
2947 EIP = env->sysenter_eip;
2948}
2949
2950void helper_sysexit(void)
2951{
2952 int cpl;
2953
2954 cpl = env->hflags & HF_CPL_MASK;
2955 if (env->sysenter_cs == 0 || cpl != 0) {
2956 raise_exception_err(EXCP0D_GPF, 0);
2957 }
2958 cpu_x86_set_cpl(env, 3);
2959 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2960 0, 0xffffffff,
2961 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2962 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2963 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2964 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2965 0, 0xffffffff,
2966 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2967 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2968 DESC_W_MASK | DESC_A_MASK);
2969 ESP = ECX;
2970 EIP = EDX;
2971#ifdef USE_KQEMU
2972 if (kqemu_is_ok(env)) {
2973 env->exception_index = -1;
2974 cpu_loop_exit();
2975 }
2976#endif
2977}
2978
2979void helper_movl_crN_T0(int reg)
2980{
2981#if !defined(CONFIG_USER_ONLY)
2982 switch(reg) {
2983 case 0:
2984 cpu_x86_update_cr0(env, T0);
2985 break;
2986 case 3:
2987 cpu_x86_update_cr3(env, T0);
2988 break;
2989 case 4:
2990 cpu_x86_update_cr4(env, T0);
2991 break;
2992 case 8:
2993 cpu_set_apic_tpr(env, T0);
2994 break;
2995 default:
2996 env->cr[reg] = T0;
2997 break;
2998 }
2999#endif
3000}
3001
3002/* XXX: do more */
3003void helper_movl_drN_T0(int reg)
3004{
3005 env->dr[reg] = T0;
3006}
3007
3008void helper_invlpg(target_ulong addr)
3009{
3010 cpu_x86_flush_tlb(env, addr);
3011}
3012
3013void helper_rdtsc(void)
3014{
3015 uint64_t val;
3016
3017 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3018 raise_exception(EXCP0D_GPF);
3019 }
3020 val = cpu_get_tsc(env);
3021 EAX = (uint32_t)(val);
3022 EDX = (uint32_t)(val >> 32);
3023}
3024
3025#ifdef VBOX
3026void helper_rdtscp(void)
3027{
3028 uint64_t val;
3029
3030 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031 raise_exception(EXCP0D_GPF);
3032 }
3033
3034 val = cpu_get_tsc(env);
3035 EAX = (uint32_t)(val);
3036 EDX = (uint32_t)(val >> 32);
3037 ECX = cpu_rdmsr(env, MSR_K8_TSC_AUX);
3038}
3039#endif
3040
3041#if defined(CONFIG_USER_ONLY)
3042void helper_wrmsr(void)
3043{
3044}
3045
3046void helper_rdmsr(void)
3047{
3048}
3049#else
3050void helper_wrmsr(void)
3051{
3052 uint64_t val;
3053
3054 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3055
3056 switch((uint32_t)ECX) {
3057 case MSR_IA32_SYSENTER_CS:
3058 env->sysenter_cs = val & 0xffff;
3059 break;
3060 case MSR_IA32_SYSENTER_ESP:
3061 env->sysenter_esp = val;
3062 break;
3063 case MSR_IA32_SYSENTER_EIP:
3064 env->sysenter_eip = val;
3065 break;
3066 case MSR_IA32_APICBASE:
3067 cpu_set_apic_base(env, val);
3068 break;
3069 case MSR_EFER:
3070 {
3071 uint64_t update_mask;
3072 update_mask = 0;
3073 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3074 update_mask |= MSR_EFER_SCE;
3075 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3076 update_mask |= MSR_EFER_LME;
3077 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3078 update_mask |= MSR_EFER_FFXSR;
3079 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3080 update_mask |= MSR_EFER_NXE;
3081 env->efer = (env->efer & ~update_mask) |
3082 (val & update_mask);
3083 }
3084 break;
3085 case MSR_STAR:
3086 env->star = val;
3087 break;
3088 case MSR_PAT:
3089 env->pat = val;
3090 break;
3091#ifdef TARGET_X86_64
3092 case MSR_LSTAR:
3093 env->lstar = val;
3094 break;
3095 case MSR_CSTAR:
3096 env->cstar = val;
3097 break;
3098 case MSR_FMASK:
3099 env->fmask = val;
3100 break;
3101 case MSR_FSBASE:
3102 env->segs[R_FS].base = val;
3103 break;
3104 case MSR_GSBASE:
3105 env->segs[R_GS].base = val;
3106 break;
3107 case MSR_KERNELGSBASE:
3108 env->kernelgsbase = val;
3109 break;
3110#endif
3111 default:
3112#ifndef VBOX
3113 /* XXX: exception ? */
3114 break;
3115#else /* VBOX */
3116 {
3117 uint32_t ecx = (uint32_t)ECX;
3118 /* In X2APIC specification this range is reserved for APIC control. */
3119 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3120 cpu_apic_wrmsr(env, ecx, val);
3121 /** @todo else exception? */
3122 break;
3123 }
3124 case MSR_K8_TSC_AUX:
3125 cpu_wrmsr(env, MSR_K8_TSC_AUX, val);
3126 break;
3127#endif /* VBOX */
3128 }
3129}
3130
3131void helper_rdmsr(void)
3132{
3133 uint64_t val;
3134 switch((uint32_t)ECX) {
3135 case MSR_IA32_SYSENTER_CS:
3136 val = env->sysenter_cs;
3137 break;
3138 case MSR_IA32_SYSENTER_ESP:
3139 val = env->sysenter_esp;
3140 break;
3141 case MSR_IA32_SYSENTER_EIP:
3142 val = env->sysenter_eip;
3143 break;
3144 case MSR_IA32_APICBASE:
3145 val = cpu_get_apic_base(env);
3146 break;
3147 case MSR_EFER:
3148 val = env->efer;
3149 break;
3150 case MSR_STAR:
3151 val = env->star;
3152 break;
3153 case MSR_PAT:
3154 val = env->pat;
3155 break;
3156#ifdef TARGET_X86_64
3157 case MSR_LSTAR:
3158 val = env->lstar;
3159 break;
3160 case MSR_CSTAR:
3161 val = env->cstar;
3162 break;
3163 case MSR_FMASK:
3164 val = env->fmask;
3165 break;
3166 case MSR_FSBASE:
3167 val = env->segs[R_FS].base;
3168 break;
3169 case MSR_GSBASE:
3170 val = env->segs[R_GS].base;
3171 break;
3172 case MSR_KERNELGSBASE:
3173 val = env->kernelgsbase;
3174 break;
3175#endif
3176 default:
3177#ifndef VBOX
3178 /* XXX: exception ? */
3179 val = 0;
3180 break;
3181#else /* VBOX */
3182 {
3183 uint32_t ecx = (uint32_t)ECX;
3184 /* In X2APIC specification this range is reserved for APIC control. */
3185 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3186 val = cpu_apic_rdmsr(env, ecx);
3187 else
3188 val = 0; /** @todo else exception? */
3189 break;
3190 }
3191 case MSR_IA32_TSC:
3192 case MSR_K8_TSC_AUX:
3193 val = cpu_rdmsr(env, (uint32_t)ECX);
3194 break;
3195#endif /* VBOX */
3196 }
3197 EAX = (uint32_t)(val);
3198 EDX = (uint32_t)(val >> 32);
3199}
3200#endif
3201
3202void helper_lsl(void)
3203{
3204 unsigned int selector, limit;
3205 uint32_t e1, e2, eflags;
3206 int rpl, dpl, cpl, type;
3207
3208 eflags = cc_table[CC_OP].compute_all();
3209 selector = T0 & 0xffff;
3210 if (load_segment(&e1, &e2, selector) != 0)
3211 goto fail;
3212 rpl = selector & 3;
3213 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3214 cpl = env->hflags & HF_CPL_MASK;
3215 if (e2 & DESC_S_MASK) {
3216 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3217 /* conforming */
3218 } else {
3219 if (dpl < cpl || dpl < rpl)
3220 goto fail;
3221 }
3222 } else {
3223 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3224 switch(type) {
3225 case 1:
3226 case 2:
3227 case 3:
3228 case 9:
3229 case 11:
3230 break;
3231 default:
3232 goto fail;
3233 }
3234 if (dpl < cpl || dpl < rpl) {
3235 fail:
3236 CC_SRC = eflags & ~CC_Z;
3237 return;
3238 }
3239 }
3240 limit = get_seg_limit(e1, e2);
3241 T1 = limit;
3242 CC_SRC = eflags | CC_Z;
3243}
3244
3245void helper_lar(void)
3246{
3247 unsigned int selector;
3248 uint32_t e1, e2, eflags;
3249 int rpl, dpl, cpl, type;
3250
3251 eflags = cc_table[CC_OP].compute_all();
3252 selector = T0 & 0xffff;
3253 if ((selector & 0xfffc) == 0)
3254 goto fail;
3255 if (load_segment(&e1, &e2, selector) != 0)
3256 goto fail;
3257 rpl = selector & 3;
3258 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3259 cpl = env->hflags & HF_CPL_MASK;
3260 if (e2 & DESC_S_MASK) {
3261 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3262 /* conforming */
3263 } else {
3264 if (dpl < cpl || dpl < rpl)
3265 goto fail;
3266 }
3267 } else {
3268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3269 switch(type) {
3270 case 1:
3271 case 2:
3272 case 3:
3273 case 4:
3274 case 5:
3275 case 9:
3276 case 11:
3277 case 12:
3278 break;
3279 default:
3280 goto fail;
3281 }
3282 if (dpl < cpl || dpl < rpl) {
3283 fail:
3284 CC_SRC = eflags & ~CC_Z;
3285 return;
3286 }
3287 }
3288 T1 = e2 & 0x00f0ff00;
3289 CC_SRC = eflags | CC_Z;
3290}
3291
3292void helper_verr(void)
3293{
3294 unsigned int selector;
3295 uint32_t e1, e2, eflags;
3296 int rpl, dpl, cpl;
3297
3298 eflags = cc_table[CC_OP].compute_all();
3299 selector = T0 & 0xffff;
3300 if ((selector & 0xfffc) == 0)
3301 goto fail;
3302 if (load_segment(&e1, &e2, selector) != 0)
3303 goto fail;
3304 if (!(e2 & DESC_S_MASK))
3305 goto fail;
3306 rpl = selector & 3;
3307 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3308 cpl = env->hflags & HF_CPL_MASK;
3309 if (e2 & DESC_CS_MASK) {
3310 if (!(e2 & DESC_R_MASK))
3311 goto fail;
3312 if (!(e2 & DESC_C_MASK)) {
3313 if (dpl < cpl || dpl < rpl)
3314 goto fail;
3315 }
3316 } else {
3317 if (dpl < cpl || dpl < rpl) {
3318 fail:
3319 CC_SRC = eflags & ~CC_Z;
3320 return;
3321 }
3322 }
3323 CC_SRC = eflags | CC_Z;
3324}
3325
3326void helper_verw(void)
3327{
3328 unsigned int selector;
3329 uint32_t e1, e2, eflags;
3330 int rpl, dpl, cpl;
3331
3332 eflags = cc_table[CC_OP].compute_all();
3333 selector = T0 & 0xffff;
3334 if ((selector & 0xfffc) == 0)
3335 goto fail;
3336 if (load_segment(&e1, &e2, selector) != 0)
3337 goto fail;
3338 if (!(e2 & DESC_S_MASK))
3339 goto fail;
3340 rpl = selector & 3;
3341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3342 cpl = env->hflags & HF_CPL_MASK;
3343 if (e2 & DESC_CS_MASK) {
3344 goto fail;
3345 } else {
3346 if (dpl < cpl || dpl < rpl)
3347 goto fail;
3348 if (!(e2 & DESC_W_MASK)) {
3349 fail:
3350 CC_SRC = eflags & ~CC_Z;
3351 return;
3352 }
3353 }
3354 CC_SRC = eflags | CC_Z;
3355}
3356
3357/* FPU helpers */
3358
3359void helper_fldt_ST0_A0(void)
3360{
3361 int new_fpstt;
3362 new_fpstt = (env->fpstt - 1) & 7;
3363 env->fpregs[new_fpstt].d = helper_fldt(A0);
3364 env->fpstt = new_fpstt;
3365 env->fptags[new_fpstt] = 0; /* validate stack entry */
3366}
3367
3368void helper_fstt_ST0_A0(void)
3369{
3370 helper_fstt(ST0, A0);
3371}
3372
3373void fpu_set_exception(int mask)
3374{
3375 env->fpus |= mask;
3376 if (env->fpus & (~env->fpuc & FPUC_EM))
3377 env->fpus |= FPUS_SE | FPUS_B;
3378}
3379
3380CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3381{
3382 if (b == 0.0)
3383 fpu_set_exception(FPUS_ZE);
3384 return a / b;
3385}
3386
3387void fpu_raise_exception(void)
3388{
3389 if (env->cr[0] & CR0_NE_MASK) {
3390 raise_exception(EXCP10_COPR);
3391 }
3392#if !defined(CONFIG_USER_ONLY)
3393 else {
3394 cpu_set_ferr(env);
3395 }
3396#endif
3397}
3398
3399/* BCD ops */
3400
3401void helper_fbld_ST0_A0(void)
3402{
3403 CPU86_LDouble tmp;
3404 uint64_t val;
3405 unsigned int v;
3406 int i;
3407
3408 val = 0;
3409 for(i = 8; i >= 0; i--) {
3410 v = ldub(A0 + i);
3411 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3412 }
3413 tmp = val;
3414 if (ldub(A0 + 9) & 0x80)
3415 tmp = -tmp;
3416 fpush();
3417 ST0 = tmp;
3418}
3419
3420void helper_fbst_ST0_A0(void)
3421{
3422 int v;
3423 target_ulong mem_ref, mem_end;
3424 int64_t val;
3425
3426 val = floatx_to_int64(ST0, &env->fp_status);
3427 mem_ref = A0;
3428 mem_end = mem_ref + 9;
3429 if (val < 0) {
3430 stb(mem_end, 0x80);
3431 val = -val;
3432 } else {
3433 stb(mem_end, 0x00);
3434 }
3435 while (mem_ref < mem_end) {
3436 if (val == 0)
3437 break;
3438 v = val % 100;
3439 val = val / 100;
3440 v = ((v / 10) << 4) | (v % 10);
3441 stb(mem_ref++, v);
3442 }
3443 while (mem_ref < mem_end) {
3444 stb(mem_ref++, 0);
3445 }
3446}
3447
3448void helper_f2xm1(void)
3449{
3450 ST0 = pow(2.0,ST0) - 1.0;
3451}
3452
3453void helper_fyl2x(void)
3454{
3455 CPU86_LDouble fptemp;
3456
3457 fptemp = ST0;
3458 if (fptemp>0.0){
3459 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3460 ST1 *= fptemp;
3461 fpop();
3462 } else {
3463 env->fpus &= (~0x4700);
3464 env->fpus |= 0x400;
3465 }
3466}
3467
3468void helper_fptan(void)
3469{
3470 CPU86_LDouble fptemp;
3471
3472 fptemp = ST0;
3473 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3474 env->fpus |= 0x400;
3475 } else {
3476 ST0 = tan(fptemp);
3477 fpush();
3478 ST0 = 1.0;
3479 env->fpus &= (~0x400); /* C2 <-- 0 */
3480 /* the above code is for |arg| < 2**52 only */
3481 }
3482}
3483
3484void helper_fpatan(void)
3485{
3486 CPU86_LDouble fptemp, fpsrcop;
3487
3488 fpsrcop = ST1;
3489 fptemp = ST0;
3490 ST1 = atan2(fpsrcop,fptemp);
3491 fpop();
3492}
3493
3494void helper_fxtract(void)
3495{
3496 CPU86_LDoubleU temp;
3497 unsigned int expdif;
3498
3499 temp.d = ST0;
3500 expdif = EXPD(temp) - EXPBIAS;
3501 /*DP exponent bias*/
3502 ST0 = expdif;
3503 fpush();
3504 BIASEXPONENT(temp);
3505 ST0 = temp.d;
3506}
3507
3508void helper_fprem1(void)
3509{
3510 CPU86_LDouble dblq, fpsrcop, fptemp;
3511 CPU86_LDoubleU fpsrcop1, fptemp1;
3512 int expdif;
3513 int q;
3514
3515 fpsrcop = ST0;
3516 fptemp = ST1;
3517 fpsrcop1.d = fpsrcop;
3518 fptemp1.d = fptemp;
3519 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3520 if (expdif < 53) {
3521 dblq = fpsrcop / fptemp;
3522 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3523 ST0 = fpsrcop - fptemp*dblq;
3524 q = (int)dblq; /* cutting off top bits is assumed here */
3525 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3526 /* (C0,C1,C3) <-- (q2,q1,q0) */
3527 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3528 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3529 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3530 } else {
3531 env->fpus |= 0x400; /* C2 <-- 1 */
3532 fptemp = pow(2.0, expdif-50);
3533 fpsrcop = (ST0 / ST1) / fptemp;
3534 /* fpsrcop = integer obtained by rounding to the nearest */
3535 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3536 floor(fpsrcop): ceil(fpsrcop);
3537 ST0 -= (ST1 * fpsrcop * fptemp);
3538 }
3539}
3540
3541void helper_fprem(void)
3542{
3543 CPU86_LDouble dblq, fpsrcop, fptemp;
3544 CPU86_LDoubleU fpsrcop1, fptemp1;
3545 int expdif;
3546 int q;
3547
3548 fpsrcop = ST0;
3549 fptemp = ST1;
3550 fpsrcop1.d = fpsrcop;
3551 fptemp1.d = fptemp;
3552 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3553 if ( expdif < 53 ) {
3554 dblq = fpsrcop / fptemp;
3555 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3556 ST0 = fpsrcop - fptemp*dblq;
3557 q = (int)dblq; /* cutting off top bits is assumed here */
3558 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3559 /* (C0,C1,C3) <-- (q2,q1,q0) */
3560 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3561 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3562 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3563 } else {
3564 env->fpus |= 0x400; /* C2 <-- 1 */
3565 fptemp = pow(2.0, expdif-50);
3566 fpsrcop = (ST0 / ST1) / fptemp;
3567 /* fpsrcop = integer obtained by chopping */
3568 fpsrcop = (fpsrcop < 0.0)?
3569 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3570 ST0 -= (ST1 * fpsrcop * fptemp);
3571 }
3572}
3573
3574void helper_fyl2xp1(void)
3575{
3576 CPU86_LDouble fptemp;
3577
3578 fptemp = ST0;
3579 if ((fptemp+1.0)>0.0) {
3580 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3581 ST1 *= fptemp;
3582 fpop();
3583 } else {
3584 env->fpus &= (~0x4700);
3585 env->fpus |= 0x400;
3586 }
3587}
3588
3589void helper_fsqrt(void)
3590{
3591 CPU86_LDouble fptemp;
3592
3593 fptemp = ST0;
3594 if (fptemp<0.0) {
3595 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3596 env->fpus |= 0x400;
3597 }
3598 ST0 = sqrt(fptemp);
3599}
3600
3601void helper_fsincos(void)
3602{
3603 CPU86_LDouble fptemp;
3604
3605 fptemp = ST0;
3606 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3607 env->fpus |= 0x400;
3608 } else {
3609 ST0 = sin(fptemp);
3610 fpush();
3611 ST0 = cos(fptemp);
3612 env->fpus &= (~0x400); /* C2 <-- 0 */
3613 /* the above code is for |arg| < 2**63 only */
3614 }
3615}
3616
3617void helper_frndint(void)
3618{
3619 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3620}
3621
3622void helper_fscale(void)
3623{
3624 ST0 = ldexp (ST0, (int)(ST1));
3625}
3626
3627void helper_fsin(void)
3628{
3629 CPU86_LDouble fptemp;
3630
3631 fptemp = ST0;
3632 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3633 env->fpus |= 0x400;
3634 } else {
3635 ST0 = sin(fptemp);
3636 env->fpus &= (~0x400); /* C2 <-- 0 */
3637 /* the above code is for |arg| < 2**53 only */
3638 }
3639}
3640
3641void helper_fcos(void)
3642{
3643 CPU86_LDouble fptemp;
3644
3645 fptemp = ST0;
3646 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3647 env->fpus |= 0x400;
3648 } else {
3649 ST0 = cos(fptemp);
3650 env->fpus &= (~0x400); /* C2 <-- 0 */
3651 /* the above code is for |arg5 < 2**63 only */
3652 }
3653}
3654
3655void helper_fxam_ST0(void)
3656{
3657 CPU86_LDoubleU temp;
3658 int expdif;
3659
3660 temp.d = ST0;
3661
3662 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3663 if (SIGND(temp))
3664 env->fpus |= 0x200; /* C1 <-- 1 */
3665
3666 /* XXX: test fptags too */
3667 expdif = EXPD(temp);
3668 if (expdif == MAXEXPD) {
3669#ifdef USE_X86LDOUBLE
3670 if (MANTD(temp) == 0x8000000000000000ULL)
3671#else
3672 if (MANTD(temp) == 0)
3673#endif
3674 env->fpus |= 0x500 /*Infinity*/;
3675 else
3676 env->fpus |= 0x100 /*NaN*/;
3677 } else if (expdif == 0) {
3678 if (MANTD(temp) == 0)
3679 env->fpus |= 0x4000 /*Zero*/;
3680 else
3681 env->fpus |= 0x4400 /*Denormal*/;
3682 } else {
3683 env->fpus |= 0x400;
3684 }
3685}
3686
3687void helper_fstenv(target_ulong ptr, int data32)
3688{
3689 int fpus, fptag, exp, i;
3690 uint64_t mant;
3691 CPU86_LDoubleU tmp;
3692
3693 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3694 fptag = 0;
3695 for (i=7; i>=0; i--) {
3696 fptag <<= 2;
3697 if (env->fptags[i]) {
3698 fptag |= 3;
3699 } else {
3700 tmp.d = env->fpregs[i].d;
3701 exp = EXPD(tmp);
3702 mant = MANTD(tmp);
3703 if (exp == 0 && mant == 0) {
3704 /* zero */
3705 fptag |= 1;
3706 } else if (exp == 0 || exp == MAXEXPD
3707#ifdef USE_X86LDOUBLE
3708 || (mant & (1LL << 63)) == 0
3709#endif
3710 ) {
3711 /* NaNs, infinity, denormal */
3712 fptag |= 2;
3713 }
3714 }
3715 }
3716 if (data32) {
3717 /* 32 bit */
3718 stl(ptr, env->fpuc);
3719 stl(ptr + 4, fpus);
3720 stl(ptr + 8, fptag);
3721 stl(ptr + 12, 0); /* fpip */
3722 stl(ptr + 16, 0); /* fpcs */
3723 stl(ptr + 20, 0); /* fpoo */
3724 stl(ptr + 24, 0); /* fpos */
3725 } else {
3726 /* 16 bit */
3727 stw(ptr, env->fpuc);
3728 stw(ptr + 2, fpus);
3729 stw(ptr + 4, fptag);
3730 stw(ptr + 6, 0);
3731 stw(ptr + 8, 0);
3732 stw(ptr + 10, 0);
3733 stw(ptr + 12, 0);
3734 }
3735}
3736
3737void helper_fldenv(target_ulong ptr, int data32)
3738{
3739 int i, fpus, fptag;
3740
3741 if (data32) {
3742 env->fpuc = lduw(ptr);
3743 fpus = lduw(ptr + 4);
3744 fptag = lduw(ptr + 8);
3745 }
3746 else {
3747 env->fpuc = lduw(ptr);
3748 fpus = lduw(ptr + 2);
3749 fptag = lduw(ptr + 4);
3750 }
3751 env->fpstt = (fpus >> 11) & 7;
3752 env->fpus = fpus & ~0x3800;
3753 for(i = 0;i < 8; i++) {
3754 env->fptags[i] = ((fptag & 3) == 3);
3755 fptag >>= 2;
3756 }
3757}
3758
3759void helper_fsave(target_ulong ptr, int data32)
3760{
3761 CPU86_LDouble tmp;
3762 int i;
3763
3764 helper_fstenv(ptr, data32);
3765
3766 ptr += (14 << data32);
3767 for(i = 0;i < 8; i++) {
3768 tmp = ST(i);
3769 helper_fstt(tmp, ptr);
3770 ptr += 10;
3771 }
3772
3773 /* fninit */
3774 env->fpus = 0;
3775 env->fpstt = 0;
3776 env->fpuc = 0x37f;
3777 env->fptags[0] = 1;
3778 env->fptags[1] = 1;
3779 env->fptags[2] = 1;
3780 env->fptags[3] = 1;
3781 env->fptags[4] = 1;
3782 env->fptags[5] = 1;
3783 env->fptags[6] = 1;
3784 env->fptags[7] = 1;
3785}
3786
3787void helper_frstor(target_ulong ptr, int data32)
3788{
3789 CPU86_LDouble tmp;
3790 int i;
3791
3792 helper_fldenv(ptr, data32);
3793 ptr += (14 << data32);
3794
3795 for(i = 0;i < 8; i++) {
3796 tmp = helper_fldt(ptr);
3797 ST(i) = tmp;
3798 ptr += 10;
3799 }
3800}
3801
3802void helper_fxsave(target_ulong ptr, int data64)
3803{
3804 int fpus, fptag, i, nb_xmm_regs;
3805 CPU86_LDouble tmp;
3806 target_ulong addr;
3807
3808 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3809 fptag = 0;
3810 for(i = 0; i < 8; i++) {
3811 fptag |= (env->fptags[i] << i);
3812 }
3813 stw(ptr, env->fpuc);
3814 stw(ptr + 2, fpus);
3815 stw(ptr + 4, fptag ^ 0xff);
3816
3817 addr = ptr + 0x20;
3818 for(i = 0;i < 8; i++) {
3819 tmp = ST(i);
3820 helper_fstt(tmp, addr);
3821 addr += 16;
3822 }
3823
3824 if (env->cr[4] & CR4_OSFXSR_MASK) {
3825 /* XXX: finish it */
3826 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3827 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3828 nb_xmm_regs = 8 << data64;
3829 addr = ptr + 0xa0;
3830 for(i = 0; i < nb_xmm_regs; i++) {
3831 stq(addr, env->xmm_regs[i].XMM_Q(0));
3832 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3833 addr += 16;
3834 }
3835 }
3836}
3837
3838void helper_fxrstor(target_ulong ptr, int data64)
3839{
3840 int i, fpus, fptag, nb_xmm_regs;
3841 CPU86_LDouble tmp;
3842 target_ulong addr;
3843
3844 env->fpuc = lduw(ptr);
3845 fpus = lduw(ptr + 2);
3846 fptag = lduw(ptr + 4);
3847 env->fpstt = (fpus >> 11) & 7;
3848 env->fpus = fpus & ~0x3800;
3849 fptag ^= 0xff;
3850 for(i = 0;i < 8; i++) {
3851 env->fptags[i] = ((fptag >> i) & 1);
3852 }
3853
3854 addr = ptr + 0x20;
3855 for(i = 0;i < 8; i++) {
3856 tmp = helper_fldt(addr);
3857 ST(i) = tmp;
3858 addr += 16;
3859 }
3860
3861 if (env->cr[4] & CR4_OSFXSR_MASK) {
3862 /* XXX: finish it */
3863 env->mxcsr = ldl(ptr + 0x18);
3864 //ldl(ptr + 0x1c);
3865 nb_xmm_regs = 8 << data64;
3866 addr = ptr + 0xa0;
3867 for(i = 0; i < nb_xmm_regs; i++) {
3868#if !defined(VBOX) || __GNUC__ < 4
3869 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3870 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3871#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3872# if 1
3873 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3874 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3875 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3876 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3877# else
3878 /* this works fine on Mac OS X, gcc 4.0.1 */
3879 uint64_t u64 = ldq(addr);
3880 env->xmm_regs[i].XMM_Q(0);
3881 u64 = ldq(addr + 4);
3882 env->xmm_regs[i].XMM_Q(1) = u64;
3883# endif
3884#endif
3885 addr += 16;
3886 }
3887 }
3888}
3889
3890#ifndef USE_X86LDOUBLE
3891
3892void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3893{
3894 CPU86_LDoubleU temp;
3895 int e;
3896
3897 temp.d = f;
3898 /* mantissa */
3899 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3900 /* exponent + sign */
3901 e = EXPD(temp) - EXPBIAS + 16383;
3902 e |= SIGND(temp) >> 16;
3903 *pexp = e;
3904}
3905
3906CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3907{
3908 CPU86_LDoubleU temp;
3909 int e;
3910 uint64_t ll;
3911
3912 /* XXX: handle overflow ? */
3913 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3914 e |= (upper >> 4) & 0x800; /* sign */
3915 ll = (mant >> 11) & ((1LL << 52) - 1);
3916#ifdef __arm__
3917 temp.l.upper = (e << 20) | (ll >> 32);
3918 temp.l.lower = ll;
3919#else
3920 temp.ll = ll | ((uint64_t)e << 52);
3921#endif
3922 return temp.d;
3923}
3924
3925#else
3926
3927void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3928{
3929 CPU86_LDoubleU temp;
3930
3931 temp.d = f;
3932 *pmant = temp.l.lower;
3933 *pexp = temp.l.upper;
3934}
3935
3936CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3937{
3938 CPU86_LDoubleU temp;
3939
3940 temp.l.upper = upper;
3941 temp.l.lower = mant;
3942 return temp.d;
3943}
3944#endif
3945
3946#ifdef TARGET_X86_64
3947
3948//#define DEBUG_MULDIV
3949
3950static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3951{
3952 *plow += a;
3953 /* carry test */
3954 if (*plow < a)
3955 (*phigh)++;
3956 *phigh += b;
3957}
3958
3959static void neg128(uint64_t *plow, uint64_t *phigh)
3960{
3961 *plow = ~ *plow;
3962 *phigh = ~ *phigh;
3963 add128(plow, phigh, 1, 0);
3964}
3965
3966static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3967{
3968 uint32_t a0, a1, b0, b1;
3969 uint64_t v;
3970
3971 a0 = a;
3972 a1 = a >> 32;
3973
3974 b0 = b;
3975 b1 = b >> 32;
3976
3977 v = (uint64_t)a0 * (uint64_t)b0;
3978 *plow = v;
3979 *phigh = 0;
3980
3981 v = (uint64_t)a0 * (uint64_t)b1;
3982 add128(plow, phigh, v << 32, v >> 32);
3983
3984 v = (uint64_t)a1 * (uint64_t)b0;
3985 add128(plow, phigh, v << 32, v >> 32);
3986
3987 v = (uint64_t)a1 * (uint64_t)b1;
3988 *phigh += v;
3989#ifdef DEBUG_MULDIV
3990 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3991 a, b, *phigh, *plow);
3992#endif
3993}
3994
3995static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3996{
3997 int sa, sb;
3998 sa = (a < 0);
3999 if (sa)
4000 a = -a;
4001 sb = (b < 0);
4002 if (sb)
4003 b = -b;
4004 mul64(plow, phigh, a, b);
4005 if (sa ^ sb) {
4006 neg128(plow, phigh);
4007 }
4008}
4009
4010/* return TRUE if overflow */
4011static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4012{
4013 uint64_t q, r, a1, a0;
4014 int i, qb, ab;
4015
4016 a0 = *plow;
4017 a1 = *phigh;
4018 if (a1 == 0) {
4019 q = a0 / b;
4020 r = a0 % b;
4021 *plow = q;
4022 *phigh = r;
4023 } else {
4024 if (a1 >= b)
4025 return 1;
4026 /* XXX: use a better algorithm */
4027 for(i = 0; i < 64; i++) {
4028 ab = a1 >> 63;
4029 a1 = (a1 << 1) | (a0 >> 63);
4030 if (ab || a1 >= b) {
4031 a1 -= b;
4032 qb = 1;
4033 } else {
4034 qb = 0;
4035 }
4036 a0 = (a0 << 1) | qb;
4037 }
4038#if defined(DEBUG_MULDIV)
4039 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4040 *phigh, *plow, b, a0, a1);
4041#endif
4042 *plow = a0;
4043 *phigh = a1;
4044 }
4045 return 0;
4046}
4047
4048/* return TRUE if overflow */
4049static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4050{
4051 int sa, sb;
4052 sa = ((int64_t)*phigh < 0);
4053 if (sa)
4054 neg128(plow, phigh);
4055 sb = (b < 0);
4056 if (sb)
4057 b = -b;
4058 if (div64(plow, phigh, b) != 0)
4059 return 1;
4060 if (sa ^ sb) {
4061 if (*plow > (1ULL << 63))
4062 return 1;
4063 *plow = - *plow;
4064 } else {
4065 if (*plow >= (1ULL << 63))
4066 return 1;
4067 }
4068 if (sa)
4069 *phigh = - *phigh;
4070 return 0;
4071}
4072
4073void helper_mulq_EAX_T0(void)
4074{
4075 uint64_t r0, r1;
4076
4077 mul64(&r0, &r1, EAX, T0);
4078 EAX = r0;
4079 EDX = r1;
4080 CC_DST = r0;
4081 CC_SRC = r1;
4082}
4083
4084void helper_imulq_EAX_T0(void)
4085{
4086 uint64_t r0, r1;
4087
4088 imul64(&r0, &r1, EAX, T0);
4089 EAX = r0;
4090 EDX = r1;
4091 CC_DST = r0;
4092 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4093}
4094
4095void helper_imulq_T0_T1(void)
4096{
4097 uint64_t r0, r1;
4098
4099 imul64(&r0, &r1, T0, T1);
4100 T0 = r0;
4101 CC_DST = r0;
4102 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4103}
4104
4105void helper_divq_EAX_T0(void)
4106{
4107 uint64_t r0, r1;
4108 if (T0 == 0) {
4109 raise_exception(EXCP00_DIVZ);
4110 }
4111 r0 = EAX;
4112 r1 = EDX;
4113 if (div64(&r0, &r1, T0))
4114 raise_exception(EXCP00_DIVZ);
4115 EAX = r0;
4116 EDX = r1;
4117}
4118
4119void helper_idivq_EAX_T0(void)
4120{
4121 uint64_t r0, r1;
4122 if (T0 == 0) {
4123 raise_exception(EXCP00_DIVZ);
4124 }
4125 r0 = EAX;
4126 r1 = EDX;
4127 if (idiv64(&r0, &r1, T0))
4128 raise_exception(EXCP00_DIVZ);
4129 EAX = r0;
4130 EDX = r1;
4131}
4132
4133void helper_bswapq_T0(void)
4134{
4135 T0 = bswap64(T0);
4136}
4137#endif
4138
4139void helper_hlt(void)
4140{
4141 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4142 env->hflags |= HF_HALTED_MASK;
4143 env->exception_index = EXCP_HLT;
4144 cpu_loop_exit();
4145}
4146
4147void helper_monitor(void)
4148{
4149 if ((uint32_t)ECX != 0)
4150 raise_exception(EXCP0D_GPF);
4151 /* XXX: store address ? */
4152}
4153
4154void helper_mwait(void)
4155{
4156 if ((uint32_t)ECX != 0)
4157 raise_exception(EXCP0D_GPF);
4158#ifdef VBOX
4159 helper_hlt();
4160#else
4161 /* XXX: not complete but not completely erroneous */
4162 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4163 /* more than one CPU: do not sleep because another CPU may
4164 wake this one */
4165 } else {
4166 helper_hlt();
4167 }
4168#endif
4169}
4170
4171float approx_rsqrt(float a)
4172{
4173 return 1.0 / sqrt(a);
4174}
4175
4176float approx_rcp(float a)
4177{
4178 return 1.0 / a;
4179}
4180
4181void update_fp_status(void)
4182{
4183 int rnd_type;
4184
4185 /* set rounding mode */
4186 switch(env->fpuc & RC_MASK) {
4187 default:
4188 case RC_NEAR:
4189 rnd_type = float_round_nearest_even;
4190 break;
4191 case RC_DOWN:
4192 rnd_type = float_round_down;
4193 break;
4194 case RC_UP:
4195 rnd_type = float_round_up;
4196 break;
4197 case RC_CHOP:
4198 rnd_type = float_round_to_zero;
4199 break;
4200 }
4201 set_float_rounding_mode(rnd_type, &env->fp_status);
4202#ifdef FLOATX80
4203 switch((env->fpuc >> 8) & 3) {
4204 case 0:
4205 rnd_type = 32;
4206 break;
4207 case 2:
4208 rnd_type = 64;
4209 break;
4210 case 3:
4211 default:
4212 rnd_type = 80;
4213 break;
4214 }
4215 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4216#endif
4217}
4218
4219#if !defined(CONFIG_USER_ONLY)
4220
4221#define MMUSUFFIX _mmu
4222#define GETPC() (__builtin_return_address(0))
4223
4224#define SHIFT 0
4225#include "softmmu_template.h"
4226
4227#define SHIFT 1
4228#include "softmmu_template.h"
4229
4230#define SHIFT 2
4231#include "softmmu_template.h"
4232
4233#define SHIFT 3
4234#include "softmmu_template.h"
4235
4236#endif
4237
4238/* try to fill the TLB and return an exception if error. If retaddr is
4239 NULL, it means that the function was called in C code (i.e. not
4240 from generated code or from helper.c) */
4241/* XXX: fix it to restore all registers */
4242void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4243{
4244 TranslationBlock *tb;
4245 int ret;
4246 unsigned long pc;
4247 CPUX86State *saved_env;
4248
4249 /* XXX: hack to restore env in all cases, even if not called from
4250 generated code */
4251 saved_env = env;
4252 env = cpu_single_env;
4253
4254 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4255 if (ret) {
4256 if (retaddr) {
4257 /* now we have a real cpu fault */
4258 pc = (unsigned long)retaddr;
4259 tb = tb_find_pc(pc);
4260 if (tb) {
4261 /* the PC is inside the translated code. It means that we have
4262 a virtual CPU fault */
4263 cpu_restore_state(tb, env, pc, NULL);
4264 }
4265 }
4266 if (retaddr)
4267 raise_exception_err(env->exception_index, env->error_code);
4268 else
4269 raise_exception_err_norestore(env->exception_index, env->error_code);
4270 }
4271 env = saved_env;
4272}
4273
4274#ifdef VBOX
4275
4276/**
4277 * Correctly computes the eflags.
4278 * @returns eflags.
4279 * @param env1 CPU environment.
4280 */
4281uint32_t raw_compute_eflags(CPUX86State *env1)
4282{
4283 CPUX86State *savedenv = env;
4284 env = env1;
4285 uint32_t efl = compute_eflags();
4286 env = savedenv;
4287 return efl;
4288}
4289
4290/**
4291 * Reads byte from virtual address in guest memory area.
4292 * XXX: is it working for any addresses? swapped out pages?
4293 * @returns readed data byte.
4294 * @param env1 CPU environment.
4295 * @param pvAddr GC Virtual address.
4296 */
4297uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4298{
4299 CPUX86State *savedenv = env;
4300 env = env1;
4301 uint8_t u8 = ldub_kernel(addr);
4302 env = savedenv;
4303 return u8;
4304}
4305
4306/**
4307 * Reads byte from virtual address in guest memory area.
4308 * XXX: is it working for any addresses? swapped out pages?
4309 * @returns readed data byte.
4310 * @param env1 CPU environment.
4311 * @param pvAddr GC Virtual address.
4312 */
4313uint16_t read_word(CPUX86State *env1, target_ulong addr)
4314{
4315 CPUX86State *savedenv = env;
4316 env = env1;
4317 uint16_t u16 = lduw_kernel(addr);
4318 env = savedenv;
4319 return u16;
4320}
4321
4322/**
4323 * Reads byte from virtual address in guest memory area.
4324 * XXX: is it working for any addresses? swapped out pages?
4325 * @returns readed data byte.
4326 * @param env1 CPU environment.
4327 * @param pvAddr GC Virtual address.
4328 */
4329uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4330{
4331 CPUX86State *savedenv = env;
4332 env = env1;
4333 uint32_t u32 = ldl_kernel(addr);
4334 env = savedenv;
4335 return u32;
4336}
4337
4338/**
4339 * Writes byte to virtual address in guest memory area.
4340 * XXX: is it working for any addresses? swapped out pages?
4341 * @returns readed data byte.
4342 * @param env1 CPU environment.
4343 * @param pvAddr GC Virtual address.
4344 * @param val byte value
4345 */
4346void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4347{
4348 CPUX86State *savedenv = env;
4349 env = env1;
4350 stb(addr, val);
4351 env = savedenv;
4352}
4353
4354void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4355{
4356 CPUX86State *savedenv = env;
4357 env = env1;
4358 stw(addr, val);
4359 env = savedenv;
4360}
4361
4362void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4363{
4364 CPUX86State *savedenv = env;
4365 env = env1;
4366 stl(addr, val);
4367 env = savedenv;
4368}
4369
4370/**
4371 * Correctly loads selector into segment register with updating internal
4372 * qemu data/caches.
4373 * @param env1 CPU environment.
4374 * @param seg_reg Segment register.
4375 * @param selector Selector to load.
4376 */
4377void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4378{
4379 CPUX86State *savedenv = env;
4380 env = env1;
4381
4382 if ( env->eflags & X86_EFL_VM
4383 || !(env->cr[0] & X86_CR0_PE))
4384 {
4385 load_seg_vm(seg_reg, selector);
4386
4387 env = savedenv;
4388
4389 /* Successful sync. */
4390 env1->segs[seg_reg].newselector = 0;
4391 }
4392 else
4393 {
4394 if (setjmp(env1->jmp_env) == 0)
4395 {
4396 if (seg_reg == R_CS)
4397 {
4398 uint32_t e1, e2;
4399 load_segment(&e1, &e2, selector);
4400 cpu_x86_load_seg_cache(env, R_CS, selector,
4401 get_seg_base(e1, e2),
4402 get_seg_limit(e1, e2),
4403 e2);
4404 }
4405 else
4406 load_seg(seg_reg, selector);
4407 env = savedenv;
4408
4409 /* Successful sync. */
4410 env1->segs[seg_reg].newselector = 0;
4411 }
4412 else
4413 {
4414 env = savedenv;
4415
4416 /* Postpone sync until the guest uses the selector. */
4417 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4418 env1->segs[seg_reg].newselector = selector;
4419 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4420 }
4421 }
4422
4423}
4424
4425
4426/**
4427 * Correctly loads a new ldtr selector.
4428 *
4429 * @param env1 CPU environment.
4430 * @param selector Selector to load.
4431 */
4432void sync_ldtr(CPUX86State *env1, int selector)
4433{
4434 CPUX86State *saved_env = env;
4435 target_ulong saved_T0 = T0;
4436 if (setjmp(env1->jmp_env) == 0)
4437 {
4438 env = env1;
4439 T0 = selector;
4440 helper_lldt_T0();
4441 T0 = saved_T0;
4442 env = saved_env;
4443 }
4444 else
4445 {
4446 T0 = saved_T0;
4447 env = saved_env;
4448#ifdef VBOX_STRICT
4449 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4450#endif
4451 }
4452}
4453
4454int emulate_single_instr(CPUX86State *env1)
4455{
4456#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4457 /* This has to be static because it needs to be addressible
4458 using 32-bit immediate addresses on 64-bit machines. This
4459 is dictated by the gcc code model used when building this
4460 module / op.o. Using a static here pushes the problem
4461 onto the module loader. */
4462 static TranslationBlock tb_temp;
4463#endif
4464 TranslationBlock *tb;
4465 TranslationBlock *current;
4466 int csize;
4467 void (*gen_func)(void);
4468 uint8_t *tc_ptr;
4469 target_ulong old_eip;
4470
4471 /* ensures env is loaded in ebp! */
4472 CPUX86State *savedenv = env;
4473 env = env1;
4474
4475 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4476
4477#if 1 /* see above */
4478 tc_ptr = env->pvCodeBuffer;
4479#else
4480 tc_ptr = code_gen_ptr;
4481#endif
4482
4483 /*
4484 * Setup temporary translation block.
4485 */
4486 /* tb_alloc: */
4487#if 1 /* see above */
4488 tb = &tb_temp;
4489 tb->pc = env->segs[R_CS].base + env->eip;
4490 tb->cflags = 0;
4491#else
4492 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4493 if (!tb)
4494 {
4495 tb_flush(env);
4496 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4497 }
4498#endif
4499
4500 /* tb_find_slow: */
4501 tb->tc_ptr = tc_ptr;
4502 tb->cs_base = env->segs[R_CS].base;
4503 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4504
4505 /* Initialize the rest with sensible values. */
4506 tb->size = 0;
4507 tb->phys_hash_next = NULL;
4508 tb->page_next[0] = NULL;
4509 tb->page_next[1] = NULL;
4510 tb->page_addr[0] = 0;
4511 tb->page_addr[1] = 0;
4512 tb->tb_next_offset[0] = 0xffff;
4513 tb->tb_next_offset[1] = 0xffff;
4514 tb->tb_next[0] = 0xffff;
4515 tb->tb_next[1] = 0xffff;
4516 tb->jmp_next[0] = NULL;
4517 tb->jmp_next[1] = NULL;
4518 tb->jmp_first = NULL;
4519
4520 current = env->current_tb;
4521 env->current_tb = NULL;
4522
4523 /*
4524 * Translate only one instruction.
4525 */
4526 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4527 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4528 {
4529 AssertFailed();
4530 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4531 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4532 env = savedenv;
4533 return -1;
4534 }
4535#ifdef DEBUG
4536 if(csize > env->cbCodeBuffer)
4537 {
4538 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4539 AssertFailed();
4540 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4541 env = savedenv;
4542 return -1;
4543 }
4544 if (tb->tc_ptr != tc_ptr)
4545 {
4546 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4547 AssertFailed();
4548 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4549 env = savedenv;
4550 return -1;
4551 }
4552#endif
4553 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4554
4555 /* tb_link_phys: */
4556 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4557 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4558 if (tb->tb_next_offset[0] != 0xffff)
4559 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4560 if (tb->tb_next_offset[1] != 0xffff)
4561 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4562
4563 /*
4564 * Execute it using emulation
4565 */
4566 old_eip = env->eip;
4567 gen_func = (void *)tb->tc_ptr;
4568 env->current_tb = tb;
4569
4570 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4571 // perhaps not a very safe hack
4572 while(old_eip == env->eip)
4573 {
4574 gen_func();
4575 /*
4576 * Exit once we detect an external interrupt and interrupts are enabled
4577 */
4578 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4579 ( (env->eflags & IF_MASK) &&
4580 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4581 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4582 {
4583 break;
4584 }
4585 }
4586 env->current_tb = current;
4587
4588 Assert(tb->phys_hash_next == NULL);
4589 Assert(tb->page_next[0] == NULL);
4590 Assert(tb->page_next[1] == NULL);
4591 Assert(tb->page_addr[0] == 0);
4592 Assert(tb->page_addr[1] == 0);
4593/*
4594 Assert(tb->tb_next_offset[0] == 0xffff);
4595 Assert(tb->tb_next_offset[1] == 0xffff);
4596 Assert(tb->tb_next[0] == 0xffff);
4597 Assert(tb->tb_next[1] == 0xffff);
4598 Assert(tb->jmp_next[0] == NULL);
4599 Assert(tb->jmp_next[1] == NULL);
4600 Assert(tb->jmp_first == NULL); */
4601
4602 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4603
4604 /*
4605 * Execute the next instruction when we encounter instruction fusing.
4606 */
4607 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4608 {
4609 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %RGv\n", env->eip));
4610 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4611 emulate_single_instr(env);
4612 }
4613
4614 env = savedenv;
4615 return 0;
4616}
4617
4618int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4619 uint32_t *esp_ptr, int dpl)
4620{
4621 int type, index, shift;
4622
4623 CPUX86State *savedenv = env;
4624 env = env1;
4625
4626 if (!(env->tr.flags & DESC_P_MASK))
4627 cpu_abort(env, "invalid tss");
4628 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4629 if ((type & 7) != 1)
4630 cpu_abort(env, "invalid tss type %d", type);
4631 shift = type >> 3;
4632 index = (dpl * 4 + 2) << shift;
4633 if (index + (4 << shift) - 1 > env->tr.limit)
4634 {
4635 env = savedenv;
4636 return 0;
4637 }
4638 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4639
4640 if (shift == 0) {
4641 *esp_ptr = lduw_kernel(env->tr.base + index);
4642 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4643 } else {
4644 *esp_ptr = ldl_kernel(env->tr.base + index);
4645 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4646 }
4647
4648 env = savedenv;
4649 return 1;
4650}
4651
4652//*****************************************************************************
4653// Needs to be at the bottom of the file (overriding macros)
4654
4655static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4656{
4657 return *(CPU86_LDouble *)ptr;
4658}
4659
4660static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4661{
4662 *(CPU86_LDouble *)ptr = f;
4663}
4664
4665#undef stw
4666#undef stl
4667#undef stq
4668#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4669#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4670#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4671#define data64 0
4672
4673//*****************************************************************************
4674void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4675{
4676 int fpus, fptag, i, nb_xmm_regs;
4677 CPU86_LDouble tmp;
4678 uint8_t *addr;
4679
4680 if (env->cpuid_features & CPUID_FXSR)
4681 {
4682 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4683 fptag = 0;
4684 for(i = 0; i < 8; i++) {
4685 fptag |= (env->fptags[i] << i);
4686 }
4687 stw(ptr, env->fpuc);
4688 stw(ptr + 2, fpus);
4689 stw(ptr + 4, fptag ^ 0xff);
4690
4691 addr = ptr + 0x20;
4692 for(i = 0;i < 8; i++) {
4693 tmp = ST(i);
4694 helper_fstt_raw(tmp, addr);
4695 addr += 16;
4696 }
4697
4698 if (env->cr[4] & CR4_OSFXSR_MASK) {
4699 /* XXX: finish it */
4700 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4701 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4702 nb_xmm_regs = 8 << data64;
4703 addr = ptr + 0xa0;
4704 for(i = 0; i < nb_xmm_regs; i++) {
4705#if __GNUC__ < 4
4706 stq(addr, env->xmm_regs[i].XMM_Q(0));
4707 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4708#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4709 stl(addr, env->xmm_regs[i].XMM_L(0));
4710 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4711 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4712 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4713#endif
4714 addr += 16;
4715 }
4716 }
4717 }
4718 else
4719 {
4720 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4721 int fptag;
4722
4723 fp->FCW = env->fpuc;
4724 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4725 fptag = 0;
4726 for (i=7; i>=0; i--) {
4727 fptag <<= 2;
4728 if (env->fptags[i]) {
4729 fptag |= 3;
4730 } else {
4731 /* the FPU automatically computes it */
4732 }
4733 }
4734 fp->FTW = fptag;
4735
4736 for(i = 0;i < 8; i++) {
4737 tmp = ST(i);
4738 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4739 }
4740 }
4741}
4742
4743//*****************************************************************************
4744#undef lduw
4745#undef ldl
4746#undef ldq
4747#define lduw(a) *(uint16_t *)(a)
4748#define ldl(a) *(uint32_t *)(a)
4749#define ldq(a) *(uint64_t *)(a)
4750//*****************************************************************************
4751void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4752{
4753 int i, fpus, fptag, nb_xmm_regs;
4754 CPU86_LDouble tmp;
4755 uint8_t *addr;
4756
4757 if (env->cpuid_features & CPUID_FXSR)
4758 {
4759 env->fpuc = lduw(ptr);
4760 fpus = lduw(ptr + 2);
4761 fptag = lduw(ptr + 4);
4762 env->fpstt = (fpus >> 11) & 7;
4763 env->fpus = fpus & ~0x3800;
4764 fptag ^= 0xff;
4765 for(i = 0;i < 8; i++) {
4766 env->fptags[i] = ((fptag >> i) & 1);
4767 }
4768
4769 addr = ptr + 0x20;
4770 for(i = 0;i < 8; i++) {
4771 tmp = helper_fldt_raw(addr);
4772 ST(i) = tmp;
4773 addr += 16;
4774 }
4775
4776 if (env->cr[4] & CR4_OSFXSR_MASK) {
4777 /* XXX: finish it, endianness */
4778 env->mxcsr = ldl(ptr + 0x18);
4779 //ldl(ptr + 0x1c);
4780 nb_xmm_regs = 8 << data64;
4781 addr = ptr + 0xa0;
4782 for(i = 0; i < nb_xmm_regs; i++) {
4783#if HC_ARCH_BITS == 32
4784 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4785 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4786 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4787 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4788 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4789#else
4790 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4791 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4792#endif
4793 addr += 16;
4794 }
4795 }
4796 }
4797 else
4798 {
4799 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4800 int fptag, j;
4801
4802 env->fpuc = fp->FCW;
4803 env->fpstt = (fp->FSW >> 11) & 7;
4804 env->fpus = fp->FSW & ~0x3800;
4805 fptag = fp->FTW;
4806 for(i = 0;i < 8; i++) {
4807 env->fptags[i] = ((fptag & 3) == 3);
4808 fptag >>= 2;
4809 }
4810 j = env->fpstt;
4811 for(i = 0;i < 8; i++) {
4812 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4813 ST(i) = tmp;
4814 }
4815 }
4816}
4817//*****************************************************************************
4818//*****************************************************************************
4819
4820#endif /* VBOX */
4821
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette