VirtualBox

source: vbox/trunk/src/recompiler/target-i386/helper.c@ 13375

最後變更 在這個檔案從13375是 13375,由 vboxsync 提交於 16 年 前

some (disabled) VMI bits

  • 屬性 svn:eol-style 設為 native
檔案大小: 136.0 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifdef VBOX
30# include <VBox/err.h>
31# include <VBox/parav.h>
32#endif
33#include "exec.h"
34
35//#define DEBUG_PCALL
36
37#if 0
38#define raise_exception_err(a, b)\
39do {\
40 if (logfile)\
41 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
43} while (0)
44#endif
45
46const uint8_t parity_table[256] = {
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79};
80
81/* modulo 17 table */
82const uint8_t rclw_table[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
87};
88
89/* modulo 9 table */
90const uint8_t rclb_table[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
95};
96
97const CPU86_LDouble f15rk[7] =
98{
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
106};
107
108/* thread support */
109
110spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111
112void cpu_lock(void)
113{
114 spin_lock(&global_cpu_lock);
115}
116
117void cpu_unlock(void)
118{
119 spin_unlock(&global_cpu_lock);
120}
121
122void cpu_loop_exit(void)
123{
124 /* NOTE: the register at this point must be saved by hand because
125 longjmp restore them */
126 regs_to_env();
127 longjmp(env->jmp_env, 1);
128}
129
130/* return non zero if error */
131static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
132 int selector)
133{
134 SegmentCache *dt;
135 int index;
136 target_ulong ptr;
137
138 if (selector & 0x4)
139 dt = &env->ldt;
140 else
141 dt = &env->gdt;
142 index = selector & ~7;
143 if ((index + 7) > dt->limit)
144 return -1;
145 ptr = dt->base + index;
146 *e1_ptr = ldl_kernel(ptr);
147 *e2_ptr = ldl_kernel(ptr + 4);
148 return 0;
149}
150
151static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
152{
153 unsigned int limit;
154 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
155 if (e2 & DESC_G_MASK)
156 limit = (limit << 12) | 0xfff;
157 return limit;
158}
159
160static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161{
162 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
163}
164
165static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166{
167 sc->base = get_seg_base(e1, e2);
168 sc->limit = get_seg_limit(e1, e2);
169 sc->flags = e2;
170}
171
172/* init the segment cache in vm86 mode. */
173static inline void load_seg_vm(int seg, int selector)
174{
175 selector &= 0xffff;
176 cpu_x86_load_seg_cache(env, seg, selector,
177 (selector << 4), 0xffff, 0);
178}
179
180static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
181 uint32_t *esp_ptr, int dpl)
182{
183 int type, index, shift;
184
185#if 0
186 {
187 int i;
188 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
189 for(i=0;i<env->tr.limit;i++) {
190 printf("%02x ", env->tr.base[i]);
191 if ((i & 7) == 7) printf("\n");
192 }
193 printf("\n");
194 }
195#endif
196
197 if (!(env->tr.flags & DESC_P_MASK))
198 cpu_abort(env, "invalid tss");
199 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
200 if ((type & 7) != 1)
201 cpu_abort(env, "invalid tss type %d", type);
202 shift = type >> 3;
203 index = (dpl * 4 + 2) << shift;
204 if (index + (4 << shift) - 1 > env->tr.limit)
205 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
206 if (shift == 0) {
207 *esp_ptr = lduw_kernel(env->tr.base + index);
208 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
209 } else {
210 *esp_ptr = ldl_kernel(env->tr.base + index);
211 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
212 }
213}
214
215/* XXX: merge with load_seg() */
216static void tss_load_seg(int seg_reg, int selector)
217{
218 uint32_t e1, e2;
219 int rpl, dpl, cpl;
220
221 if ((selector & 0xfffc) != 0) {
222 if (load_segment(&e1, &e2, selector) != 0)
223 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 if (!(e2 & DESC_S_MASK))
225 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226 rpl = selector & 3;
227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
228 cpl = env->hflags & HF_CPL_MASK;
229 if (seg_reg == R_CS) {
230 if (!(e2 & DESC_CS_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 /* XXX: is it correct ? */
233 if (dpl != rpl)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if ((e2 & DESC_C_MASK) && dpl > rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 } else if (seg_reg == R_SS) {
238 /* SS must be writable data */
239 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if (dpl != cpl || dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else {
244 /* not readable code */
245 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 /* if data or non conforming code, checks the rights */
248 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
249 if (dpl < cpl || dpl < rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 }
252 }
253 if (!(e2 & DESC_P_MASK))
254 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
255 cpu_x86_load_seg_cache(env, seg_reg, selector,
256 get_seg_base(e1, e2),
257 get_seg_limit(e1, e2),
258 e2);
259 } else {
260 if (seg_reg == R_SS || seg_reg == R_CS)
261 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
262 }
263}
264
265#define SWITCH_TSS_JMP 0
266#define SWITCH_TSS_IRET 1
267#define SWITCH_TSS_CALL 2
268
269/* XXX: restore CPU state in registers (PowerPC case) */
270static void switch_tss(int tss_selector,
271 uint32_t e1, uint32_t e2, int source,
272 uint32_t next_eip)
273{
274 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275 target_ulong tss_base;
276 uint32_t new_regs[8], new_segs[6];
277 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278 uint32_t old_eflags, eflags_mask;
279 SegmentCache *dt;
280 int index;
281 target_ulong ptr;
282
283 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
284#ifdef DEBUG_PCALL
285 if (loglevel & CPU_LOG_PCALL)
286 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
287#endif
288
289#if defined(VBOX) && defined(DEBUG)
290 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
291#endif
292
293 /* if task gate, we read the TSS segment and we load it */
294 if (type == 5) {
295 if (!(e2 & DESC_P_MASK))
296 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
297 tss_selector = e1 >> 16;
298 if (tss_selector & 4)
299 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
300 if (load_segment(&e1, &e2, tss_selector) != 0)
301 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302 if (e2 & DESC_S_MASK)
303 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
304 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
305 if ((type & 7) != 1)
306 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307 }
308
309 if (!(e2 & DESC_P_MASK))
310 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
311
312 if (type & 8)
313 tss_limit_max = 103;
314 else
315 tss_limit_max = 43;
316 tss_limit = get_seg_limit(e1, e2);
317 tss_base = get_seg_base(e1, e2);
318 if ((tss_selector & 4) != 0 ||
319 tss_limit < tss_limit_max)
320 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
321 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 if (old_type & 8)
323 old_tss_limit_max = 103;
324 else
325 old_tss_limit_max = 43;
326
327 /* read all the registers from the new TSS */
328 if (type & 8) {
329 /* 32 bit */
330 new_cr3 = ldl_kernel(tss_base + 0x1c);
331 new_eip = ldl_kernel(tss_base + 0x20);
332 new_eflags = ldl_kernel(tss_base + 0x24);
333 for(i = 0; i < 8; i++)
334 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
335 for(i = 0; i < 6; i++)
336 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
337 new_ldt = lduw_kernel(tss_base + 0x60);
338 new_trap = ldl_kernel(tss_base + 0x64);
339 } else {
340 /* 16 bit */
341 new_cr3 = 0;
342 new_eip = lduw_kernel(tss_base + 0x0e);
343 new_eflags = lduw_kernel(tss_base + 0x10);
344 for(i = 0; i < 8; i++)
345 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
346 for(i = 0; i < 4; i++)
347 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
348 new_ldt = lduw_kernel(tss_base + 0x2a);
349 new_segs[R_FS] = 0;
350 new_segs[R_GS] = 0;
351 new_trap = 0;
352 }
353
354 /* NOTE: we must avoid memory exceptions during the task switch,
355 so we make dummy accesses before */
356 /* XXX: it can still fail in some cases, so a bigger hack is
357 necessary to valid the TLB after having done the accesses */
358
359 v1 = ldub_kernel(env->tr.base);
360 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
361 stb_kernel(env->tr.base, v1);
362 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363
364 /* clear busy bit (it is restartable) */
365 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
366 target_ulong ptr;
367 uint32_t e2;
368 ptr = env->gdt.base + (env->tr.selector & ~7);
369 e2 = ldl_kernel(ptr + 4);
370 e2 &= ~DESC_TSS_BUSY_MASK;
371 stl_kernel(ptr + 4, e2);
372 }
373 old_eflags = compute_eflags();
374 if (source == SWITCH_TSS_IRET)
375 old_eflags &= ~NT_MASK;
376
377 /* save the current state in the old TSS */
378 if (type & 8) {
379 /* 32 bit */
380 stl_kernel(env->tr.base + 0x20, next_eip);
381 stl_kernel(env->tr.base + 0x24, old_eflags);
382 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
383 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
384 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
385 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
386 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
387 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
388 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
389 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
390 for(i = 0; i < 6; i++)
391 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
392#if defined(VBOX) && defined(DEBUG)
393 printf("TSS 32 bits switch\n");
394 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
395#endif
396 } else {
397 /* 16 bit */
398 stw_kernel(env->tr.base + 0x0e, next_eip);
399 stw_kernel(env->tr.base + 0x10, old_eflags);
400 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
401 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
402 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
403 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
404 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
405 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
406 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
407 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
408 for(i = 0; i < 4; i++)
409 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
410 }
411
412 /* now if an exception occurs, it will occurs in the next task
413 context */
414
415 if (source == SWITCH_TSS_CALL) {
416 stw_kernel(tss_base, env->tr.selector);
417 new_eflags |= NT_MASK;
418 }
419
420 /* set busy bit */
421 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
422 target_ulong ptr;
423 uint32_t e2;
424 ptr = env->gdt.base + (tss_selector & ~7);
425 e2 = ldl_kernel(ptr + 4);
426 e2 |= DESC_TSS_BUSY_MASK;
427 stl_kernel(ptr + 4, e2);
428 }
429
430 /* set the new CPU state */
431 /* from this point, any exception which occurs can give problems */
432 env->cr[0] |= CR0_TS_MASK;
433 env->hflags |= HF_TS_MASK;
434 env->tr.selector = tss_selector;
435 env->tr.base = tss_base;
436 env->tr.limit = tss_limit;
437 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
438
439 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
440 cpu_x86_update_cr3(env, new_cr3);
441 }
442
443 /* load all registers without an exception, then reload them with
444 possible exception */
445 env->eip = new_eip;
446 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
447 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
448 if (!(type & 8))
449 eflags_mask &= 0xffff;
450 load_eflags(new_eflags, eflags_mask);
451 /* XXX: what to do in 16 bit case ? */
452 EAX = new_regs[0];
453 ECX = new_regs[1];
454 EDX = new_regs[2];
455 EBX = new_regs[3];
456 ESP = new_regs[4];
457 EBP = new_regs[5];
458 ESI = new_regs[6];
459 EDI = new_regs[7];
460 if (new_eflags & VM_MASK) {
461 for(i = 0; i < 6; i++)
462 load_seg_vm(i, new_segs[i]);
463 /* in vm86, CPL is always 3 */
464 cpu_x86_set_cpl(env, 3);
465 } else {
466 /* CPL is set the RPL of CS */
467 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
468 /* first just selectors as the rest may trigger exceptions */
469 for(i = 0; i < 6; i++)
470 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
471 }
472
473 env->ldt.selector = new_ldt & ~4;
474 env->ldt.base = 0;
475 env->ldt.limit = 0;
476 env->ldt.flags = 0;
477
478 /* load the LDT */
479 if (new_ldt & 4)
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
482 if ((new_ldt & 0xfffc) != 0) {
483 dt = &env->gdt;
484 index = new_ldt & ~7;
485 if ((index + 7) > dt->limit)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 ptr = dt->base + index;
488 e1 = ldl_kernel(ptr);
489 e2 = ldl_kernel(ptr + 4);
490 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
491 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
492 if (!(e2 & DESC_P_MASK))
493 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
494 load_seg_cache_raw_dt(&env->ldt, e1, e2);
495 }
496
497 /* load the segments */
498 if (!(new_eflags & VM_MASK)) {
499 tss_load_seg(R_CS, new_segs[R_CS]);
500 tss_load_seg(R_SS, new_segs[R_SS]);
501 tss_load_seg(R_ES, new_segs[R_ES]);
502 tss_load_seg(R_DS, new_segs[R_DS]);
503 tss_load_seg(R_FS, new_segs[R_FS]);
504 tss_load_seg(R_GS, new_segs[R_GS]);
505 }
506
507 /* check that EIP is in the CS segment limits */
508 if (new_eip > env->segs[R_CS].limit) {
509 /* XXX: different exception if CALL ? */
510 raise_exception_err(EXCP0D_GPF, 0);
511 }
512}
513
514/* check if Port I/O is allowed in TSS */
515static inline void check_io(int addr, int size)
516{
517 int io_offset, val, mask;
518
519 /* TSS must be a valid 32 bit one */
520 if (!(env->tr.flags & DESC_P_MASK) ||
521 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
522 env->tr.limit < 103)
523 goto fail;
524 io_offset = lduw_kernel(env->tr.base + 0x66);
525 io_offset += (addr >> 3);
526 /* Note: the check needs two bytes */
527 if ((io_offset + 1) > env->tr.limit)
528 goto fail;
529 val = lduw_kernel(env->tr.base + io_offset);
530 val >>= (addr & 7);
531 mask = (1 << size) - 1;
532 /* all bits must be zero to allow the I/O */
533 if ((val & mask) != 0) {
534 fail:
535 raise_exception_err(EXCP0D_GPF, 0);
536 }
537}
538
539void check_iob_T0(void)
540{
541 check_io(T0, 1);
542}
543
544void check_iow_T0(void)
545{
546 check_io(T0, 2);
547}
548
549void check_iol_T0(void)
550{
551 check_io(T0, 4);
552}
553
554void check_iob_DX(void)
555{
556 check_io(EDX & 0xffff, 1);
557}
558
559void check_iow_DX(void)
560{
561 check_io(EDX & 0xffff, 2);
562}
563
564void check_iol_DX(void)
565{
566 check_io(EDX & 0xffff, 4);
567}
568
569static inline unsigned int get_sp_mask(unsigned int e2)
570{
571 if (e2 & DESC_B_MASK)
572 return 0xffffffff;
573 else
574 return 0xffff;
575}
576
577#ifdef TARGET_X86_64
578#define SET_ESP(val, sp_mask)\
579do {\
580 if ((sp_mask) == 0xffff)\
581 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
582 else if ((sp_mask) == 0xffffffffLL)\
583 ESP = (uint32_t)(val);\
584 else\
585 ESP = (val);\
586} while (0)
587#else
588#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
589#endif
590
591/* XXX: add a is_user flag to have proper security support */
592#define PUSHW(ssp, sp, sp_mask, val)\
593{\
594 sp -= 2;\
595 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
596}
597
598#define PUSHL(ssp, sp, sp_mask, val)\
599{\
600 sp -= 4;\
601 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
602}
603
604#define POPW(ssp, sp, sp_mask, val)\
605{\
606 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
607 sp += 2;\
608}
609
610#define POPL(ssp, sp, sp_mask, val)\
611{\
612 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
613 sp += 4;\
614}
615
616/* protected mode interrupt */
617static void do_interrupt_protected(int intno, int is_int, int error_code,
618 unsigned int next_eip, int is_hw)
619{
620 SegmentCache *dt;
621 target_ulong ptr, ssp;
622 int type, dpl, selector, ss_dpl, cpl;
623 int has_error_code, new_stack, shift;
624 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
625 uint32_t old_eip, sp_mask;
626
627#ifdef VBOX
628# ifdef VBOX_WITH_VMI
629 if ( intno == 6
630 && PARAVIsBiosCall(env->pVM, (RTRCPTR)next_eip, env->regs[R_EAX]))
631 {
632 env->exception_index = EXCP_PARAV_CALL;
633 cpu_loop_exit();
634 }
635# endif
636 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
637 cpu_loop_exit();
638#endif
639
640 has_error_code = 0;
641 if (!is_int && !is_hw) {
642 switch(intno) {
643 case 8:
644 case 10:
645 case 11:
646 case 12:
647 case 13:
648 case 14:
649 case 17:
650 has_error_code = 1;
651 break;
652 }
653 }
654 if (is_int)
655 old_eip = next_eip;
656 else
657 old_eip = env->eip;
658
659 dt = &env->idt;
660 if (intno * 8 + 7 > dt->limit)
661 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
662 ptr = dt->base + intno * 8;
663 e1 = ldl_kernel(ptr);
664 e2 = ldl_kernel(ptr + 4);
665 /* check gate type */
666 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
667 switch(type) {
668 case 5: /* task gate */
669 /* must do that check here to return the correct error code */
670 if (!(e2 & DESC_P_MASK))
671 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
672 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
673 if (has_error_code) {
674 int type;
675 uint32_t mask;
676 /* push the error code */
677 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678 shift = type >> 3;
679 if (env->segs[R_SS].flags & DESC_B_MASK)
680 mask = 0xffffffff;
681 else
682 mask = 0xffff;
683 esp = (ESP - (2 << shift)) & mask;
684 ssp = env->segs[R_SS].base + esp;
685 if (shift)
686 stl_kernel(ssp, error_code);
687 else
688 stw_kernel(ssp, error_code);
689 SET_ESP(esp, mask);
690 }
691 return;
692 case 6: /* 286 interrupt gate */
693 case 7: /* 286 trap gate */
694 case 14: /* 386 interrupt gate */
695 case 15: /* 386 trap gate */
696 break;
697 default:
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 break;
700 }
701 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702 cpl = env->hflags & HF_CPL_MASK;
703 /* check privledge if software int */
704 if (is_int && dpl < cpl)
705 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
706 /* check valid bit */
707 if (!(e2 & DESC_P_MASK))
708 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
709 selector = e1 >> 16;
710 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
711 if ((selector & 0xfffc) == 0)
712 raise_exception_err(EXCP0D_GPF, 0);
713
714 if (load_segment(&e1, &e2, selector) != 0)
715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719 if (dpl > cpl)
720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721 if (!(e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
723 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
724 /* to inner priviledge */
725 get_ss_esp_from_tss(&ss, &esp, dpl);
726 if ((ss & 0xfffc) == 0)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if ((ss & 3) != dpl)
729 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
733 if (ss_dpl != dpl)
734 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735 if (!(ss_e2 & DESC_S_MASK) ||
736 (ss_e2 & DESC_CS_MASK) ||
737 !(ss_e2 & DESC_W_MASK))
738 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739 if (!(ss_e2 & DESC_P_MASK))
740#ifdef VBOX /* See page 3-477 of 253666.pdf */
741 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
742#else
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744#endif
745 new_stack = 1;
746 sp_mask = get_sp_mask(ss_e2);
747 ssp = get_seg_base(ss_e1, ss_e2);
748#if defined(VBOX) && defined(DEBUG)
749 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
750#endif
751 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
752 /* to same priviledge */
753 if (env->eflags & VM_MASK)
754 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
755 new_stack = 0;
756 sp_mask = get_sp_mask(env->segs[R_SS].flags);
757 ssp = env->segs[R_SS].base;
758 esp = ESP;
759 dpl = cpl;
760 } else {
761 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762 new_stack = 0; /* avoid warning */
763 sp_mask = 0; /* avoid warning */
764 ssp = 0; /* avoid warning */
765 esp = 0; /* avoid warning */
766 }
767
768 shift = type >> 3;
769
770#if 0
771 /* XXX: check that enough room is available */
772 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
773 if (env->eflags & VM_MASK)
774 push_size += 8;
775 push_size <<= shift;
776#endif
777 if (shift == 1) {
778 if (new_stack) {
779 if (env->eflags & VM_MASK) {
780 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
781 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
782 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
783 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
784 }
785 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
786 PUSHL(ssp, esp, sp_mask, ESP);
787 }
788 PUSHL(ssp, esp, sp_mask, compute_eflags());
789 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
790 PUSHL(ssp, esp, sp_mask, old_eip);
791 if (has_error_code) {
792 PUSHL(ssp, esp, sp_mask, error_code);
793 }
794 } else {
795 if (new_stack) {
796 if (env->eflags & VM_MASK) {
797 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
798 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
799 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
800 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
801 }
802 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
803 PUSHW(ssp, esp, sp_mask, ESP);
804 }
805 PUSHW(ssp, esp, sp_mask, compute_eflags());
806 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
807 PUSHW(ssp, esp, sp_mask, old_eip);
808 if (has_error_code) {
809 PUSHW(ssp, esp, sp_mask, error_code);
810 }
811 }
812
813 if (new_stack) {
814 if (env->eflags & VM_MASK) {
815 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
816 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
817 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
818 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
819 }
820 ss = (ss & ~3) | dpl;
821 cpu_x86_load_seg_cache(env, R_SS, ss,
822 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
823 }
824 SET_ESP(esp, sp_mask);
825
826 selector = (selector & ~3) | dpl;
827 cpu_x86_load_seg_cache(env, R_CS, selector,
828 get_seg_base(e1, e2),
829 get_seg_limit(e1, e2),
830 e2);
831 cpu_x86_set_cpl(env, dpl);
832 env->eip = offset;
833
834 /* interrupt gate clear IF mask */
835 if ((type & 1) == 0) {
836 env->eflags &= ~IF_MASK;
837 }
838 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
839}
840
841#ifdef VBOX
842
843/* check if VME interrupt redirection is enabled in TSS */
844static inline bool is_vme_irq_redirected(int intno)
845{
846 int io_offset, intredir_offset;
847 unsigned char val, mask;
848
849 /* TSS must be a valid 32 bit one */
850 if (!(env->tr.flags & DESC_P_MASK) ||
851 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
852 env->tr.limit < 103)
853 goto fail;
854 io_offset = lduw_kernel(env->tr.base + 0x66);
855 /* the virtual interrupt redirection bitmap is located below the io bitmap */
856 intredir_offset = io_offset - 0x20;
857
858 intredir_offset += (intno >> 3);
859 if ((intredir_offset) > env->tr.limit)
860 goto fail;
861
862 val = ldub_kernel(env->tr.base + intredir_offset);
863 mask = 1 << (unsigned char)(intno & 7);
864
865 /* bit set means no redirection. */
866 if ((val & mask) != 0) {
867 return false;
868 }
869 return true;
870
871fail:
872 raise_exception_err(EXCP0D_GPF, 0);
873 return true;
874}
875
876/* V86 mode software interrupt with CR4.VME=1 */
877static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
878{
879 target_ulong ptr, ssp;
880 int selector;
881 uint32_t offset, esp;
882 uint32_t old_cs, old_eflags;
883 uint32_t iopl;
884
885 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
886
887 if (!is_vme_irq_redirected(intno))
888 {
889 if (iopl == 3)
890 /* normal protected mode handler call */
891 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
892 else
893 raise_exception_err(EXCP0D_GPF, 0);
894 }
895
896 /* virtual mode idt is at linear address 0 */
897 ptr = 0 + intno * 4;
898 offset = lduw_kernel(ptr);
899 selector = lduw_kernel(ptr + 2);
900 esp = ESP;
901 ssp = env->segs[R_SS].base;
902 old_cs = env->segs[R_CS].selector;
903
904 old_eflags = compute_eflags();
905 if (iopl < 3)
906 {
907 /* copy VIF into IF and set IOPL to 3 */
908 if (env->eflags & VIF_MASK)
909 old_eflags |= IF_MASK;
910 else
911 old_eflags &= ~IF_MASK;
912
913 old_eflags |= (3 << IOPL_SHIFT);
914 }
915
916 /* XXX: use SS segment size ? */
917 PUSHW(ssp, esp, 0xffff, old_eflags);
918 PUSHW(ssp, esp, 0xffff, old_cs);
919 PUSHW(ssp, esp, 0xffff, next_eip);
920
921 /* update processor state */
922 ESP = (ESP & ~0xffff) | (esp & 0xffff);
923 env->eip = offset;
924 env->segs[R_CS].selector = selector;
925 env->segs[R_CS].base = (selector << 4);
926 env->eflags &= ~(TF_MASK | RF_MASK);
927
928 if (iopl < 3)
929 env->eflags &= ~VIF_MASK;
930 else
931 env->eflags &= ~IF_MASK;
932}
933#endif /* VBOX */
934
935#ifdef TARGET_X86_64
936
937#define PUSHQ(sp, val)\
938{\
939 sp -= 8;\
940 stq_kernel(sp, (val));\
941}
942
943#define POPQ(sp, val)\
944{\
945 val = ldq_kernel(sp);\
946 sp += 8;\
947}
948
949static inline target_ulong get_rsp_from_tss(int level)
950{
951 int index;
952
953#if 0
954 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
955 env->tr.base, env->tr.limit);
956#endif
957
958 if (!(env->tr.flags & DESC_P_MASK))
959 cpu_abort(env, "invalid tss");
960 index = 8 * level + 4;
961 if ((index + 7) > env->tr.limit)
962 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
963 return ldq_kernel(env->tr.base + index);
964}
965
966/* 64 bit interrupt */
967static void do_interrupt64(int intno, int is_int, int error_code,
968 target_ulong next_eip, int is_hw)
969{
970 SegmentCache *dt;
971 target_ulong ptr;
972 int type, dpl, selector, cpl, ist;
973 int has_error_code, new_stack;
974 uint32_t e1, e2, e3, ss;
975 target_ulong old_eip, esp, offset;
976
977#ifdef VBOX
978 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
979 cpu_loop_exit();
980#endif
981
982 has_error_code = 0;
983 if (!is_int && !is_hw) {
984 switch(intno) {
985 case 8:
986 case 10:
987 case 11:
988 case 12:
989 case 13:
990 case 14:
991 case 17:
992 has_error_code = 1;
993 break;
994 }
995 }
996 if (is_int)
997 old_eip = next_eip;
998 else
999 old_eip = env->eip;
1000
1001 dt = &env->idt;
1002 if (intno * 16 + 15 > dt->limit)
1003 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1004 ptr = dt->base + intno * 16;
1005 e1 = ldl_kernel(ptr);
1006 e2 = ldl_kernel(ptr + 4);
1007 e3 = ldl_kernel(ptr + 8);
1008 /* check gate type */
1009 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1010 switch(type) {
1011 case 14: /* 386 interrupt gate */
1012 case 15: /* 386 trap gate */
1013 break;
1014 default:
1015 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1016 break;
1017 }
1018 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1019 cpl = env->hflags & HF_CPL_MASK;
1020 /* check privledge if software int */
1021 if (is_int && dpl < cpl)
1022 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1023 /* check valid bit */
1024 if (!(e2 & DESC_P_MASK))
1025 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1026 selector = e1 >> 16;
1027 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1028 ist = e2 & 7;
1029 if ((selector & 0xfffc) == 0)
1030 raise_exception_err(EXCP0D_GPF, 0);
1031
1032 if (load_segment(&e1, &e2, selector) != 0)
1033 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1034 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1035 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1036 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1037 if (dpl > cpl)
1038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1039 if (!(e2 & DESC_P_MASK))
1040 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1041 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1042 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1043 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1044 /* to inner priviledge */
1045 if (ist != 0)
1046 esp = get_rsp_from_tss(ist + 3);
1047 else
1048 esp = get_rsp_from_tss(dpl);
1049 esp &= ~0xfLL; /* align stack */
1050 ss = 0;
1051 new_stack = 1;
1052 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1053 /* to same priviledge */
1054 if (env->eflags & VM_MASK)
1055 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1056 new_stack = 0;
1057 if (ist != 0)
1058 esp = get_rsp_from_tss(ist + 3);
1059 else
1060 esp = ESP;
1061 esp &= ~0xfLL; /* align stack */
1062 dpl = cpl;
1063 } else {
1064 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1065 new_stack = 0; /* avoid warning */
1066 esp = 0; /* avoid warning */
1067 }
1068
1069 PUSHQ(esp, env->segs[R_SS].selector);
1070 PUSHQ(esp, ESP);
1071 PUSHQ(esp, compute_eflags());
1072 PUSHQ(esp, env->segs[R_CS].selector);
1073 PUSHQ(esp, old_eip);
1074 if (has_error_code) {
1075 PUSHQ(esp, error_code);
1076 }
1077
1078 if (new_stack) {
1079 ss = 0 | dpl;
1080 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1081 }
1082 ESP = esp;
1083
1084 selector = (selector & ~3) | dpl;
1085 cpu_x86_load_seg_cache(env, R_CS, selector,
1086 get_seg_base(e1, e2),
1087 get_seg_limit(e1, e2),
1088 e2);
1089 cpu_x86_set_cpl(env, dpl);
1090 env->eip = offset;
1091
1092 /* interrupt gate clear IF mask */
1093 if ((type & 1) == 0) {
1094 env->eflags &= ~IF_MASK;
1095 }
1096 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1097}
1098#endif
1099
1100void helper_syscall(int next_eip_addend)
1101{
1102 int selector;
1103
1104 if (!(env->efer & MSR_EFER_SCE)) {
1105 raise_exception_err(EXCP06_ILLOP, 0);
1106 }
1107 selector = (env->star >> 32) & 0xffff;
1108#ifdef TARGET_X86_64
1109 if (env->hflags & HF_LMA_MASK) {
1110 int code64;
1111
1112 ECX = env->eip + next_eip_addend;
1113 env->regs[11] = compute_eflags();
1114
1115 code64 = env->hflags & HF_CS64_MASK;
1116
1117 cpu_x86_set_cpl(env, 0);
1118 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1119 0, 0xffffffff,
1120 DESC_G_MASK | DESC_P_MASK |
1121 DESC_S_MASK |
1122 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1123 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1124 0, 0xffffffff,
1125 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1126 DESC_S_MASK |
1127 DESC_W_MASK | DESC_A_MASK);
1128 env->eflags &= ~env->fmask;
1129 load_eflags(env->eflags, 0);
1130 if (code64)
1131 env->eip = env->lstar;
1132 else
1133 env->eip = env->cstar;
1134 } else
1135#endif
1136 {
1137 ECX = (uint32_t)(env->eip + next_eip_addend);
1138
1139 cpu_x86_set_cpl(env, 0);
1140 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1141 0, 0xffffffff,
1142 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1143 DESC_S_MASK |
1144 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1145 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1146 0, 0xffffffff,
1147 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1148 DESC_S_MASK |
1149 DESC_W_MASK | DESC_A_MASK);
1150 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1151 env->eip = (uint32_t)env->star;
1152 }
1153}
1154
1155void helper_sysret(int dflag)
1156{
1157 int cpl, selector;
1158
1159 if (!(env->efer & MSR_EFER_SCE)) {
1160 raise_exception_err(EXCP06_ILLOP, 0);
1161 }
1162 cpl = env->hflags & HF_CPL_MASK;
1163 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1164 raise_exception_err(EXCP0D_GPF, 0);
1165 }
1166 selector = (env->star >> 48) & 0xffff;
1167#ifdef TARGET_X86_64
1168 if (env->hflags & HF_LMA_MASK) {
1169 if (dflag == 2) {
1170 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1171 0, 0xffffffff,
1172 DESC_G_MASK | DESC_P_MASK |
1173 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1174 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1175 DESC_L_MASK);
1176 env->eip = ECX;
1177 } else {
1178 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1179 0, 0xffffffff,
1180 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1181 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1182 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1183 env->eip = (uint32_t)ECX;
1184 }
1185 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1186 0, 0xffffffff,
1187 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1188 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1189 DESC_W_MASK | DESC_A_MASK);
1190 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1191 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1192 cpu_x86_set_cpl(env, 3);
1193 } else
1194#endif
1195 {
1196 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1197 0, 0xffffffff,
1198 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1199 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1200 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1201 env->eip = (uint32_t)ECX;
1202 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1203 0, 0xffffffff,
1204 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1205 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1206 DESC_W_MASK | DESC_A_MASK);
1207 env->eflags |= IF_MASK;
1208 cpu_x86_set_cpl(env, 3);
1209 }
1210#ifdef USE_KQEMU
1211 if (kqemu_is_ok(env)) {
1212 if (env->hflags & HF_LMA_MASK)
1213 CC_OP = CC_OP_EFLAGS;
1214 env->exception_index = -1;
1215 cpu_loop_exit();
1216 }
1217#endif
1218}
1219
1220#ifdef VBOX
1221/**
1222 * Checks and processes external VMM events.
1223 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1224 */
1225void helper_external_event(void)
1226{
1227#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1228 uintptr_t uESP;
1229 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1230 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1231#endif
1232 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1233 {
1234 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1235 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1236 }
1237 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1238 {
1239 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1240 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1241 }
1242 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1243 {
1244 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1245 remR3DmaRun(env);
1246 }
1247 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1248 {
1249 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1250 remR3TimersRun(env);
1251 }
1252}
1253/* helper for recording call instruction addresses for later scanning */
1254void helper_record_call()
1255{
1256 if ( !(env->state & CPU_RAW_RING0)
1257 && (env->cr[0] & CR0_PG_MASK)
1258 && !(env->eflags & X86_EFL_IF))
1259 remR3RecordCall(env);
1260}
1261#endif /* VBOX */
1262
1263/* real mode interrupt */
1264static void do_interrupt_real(int intno, int is_int, int error_code,
1265 unsigned int next_eip)
1266{
1267 SegmentCache *dt;
1268 target_ulong ptr, ssp;
1269 int selector;
1270 uint32_t offset, esp;
1271 uint32_t old_cs, old_eip;
1272
1273 /* real mode (simpler !) */
1274 dt = &env->idt;
1275 if (intno * 4 + 3 > dt->limit)
1276 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1277 ptr = dt->base + intno * 4;
1278 offset = lduw_kernel(ptr);
1279 selector = lduw_kernel(ptr + 2);
1280 esp = ESP;
1281 ssp = env->segs[R_SS].base;
1282 if (is_int)
1283 old_eip = next_eip;
1284 else
1285 old_eip = env->eip;
1286 old_cs = env->segs[R_CS].selector;
1287 /* XXX: use SS segment size ? */
1288 PUSHW(ssp, esp, 0xffff, compute_eflags());
1289 PUSHW(ssp, esp, 0xffff, old_cs);
1290 PUSHW(ssp, esp, 0xffff, old_eip);
1291
1292 /* update processor state */
1293 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1294 env->eip = offset;
1295 env->segs[R_CS].selector = selector;
1296 env->segs[R_CS].base = (selector << 4);
1297 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1298}
1299
1300/* fake user mode interrupt */
1301void do_interrupt_user(int intno, int is_int, int error_code,
1302 target_ulong next_eip)
1303{
1304 SegmentCache *dt;
1305 target_ulong ptr;
1306 int dpl, cpl;
1307 uint32_t e2;
1308
1309 dt = &env->idt;
1310 ptr = dt->base + (intno * 8);
1311 e2 = ldl_kernel(ptr + 4);
1312
1313 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1314 cpl = env->hflags & HF_CPL_MASK;
1315 /* check privledge if software int */
1316 if (is_int && dpl < cpl)
1317 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1318
1319 /* Since we emulate only user space, we cannot do more than
1320 exiting the emulation with the suitable exception and error
1321 code */
1322 if (is_int)
1323 EIP = next_eip;
1324}
1325
1326/*
1327 * Begin execution of an interruption. is_int is TRUE if coming from
1328 * the int instruction. next_eip is the EIP value AFTER the interrupt
1329 * instruction. It is only relevant if is_int is TRUE.
1330 */
1331void do_interrupt(int intno, int is_int, int error_code,
1332 target_ulong next_eip, int is_hw)
1333{
1334 if (loglevel & CPU_LOG_INT) {
1335 if ((env->cr[0] & CR0_PE_MASK)) {
1336 static int count;
1337 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1338 count, intno, error_code, is_int,
1339 env->hflags & HF_CPL_MASK,
1340 env->segs[R_CS].selector, EIP,
1341 (int)env->segs[R_CS].base + EIP,
1342 env->segs[R_SS].selector, ESP);
1343 if (intno == 0x0e) {
1344 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1345 } else {
1346 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1347 }
1348 fprintf(logfile, "\n");
1349 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1350#if 0
1351 {
1352 int i;
1353 uint8_t *ptr;
1354 fprintf(logfile, " code=");
1355 ptr = env->segs[R_CS].base + env->eip;
1356 for(i = 0; i < 16; i++) {
1357 fprintf(logfile, " %02x", ldub(ptr + i));
1358 }
1359 fprintf(logfile, "\n");
1360 }
1361#endif
1362 count++;
1363 }
1364 }
1365 if (env->cr[0] & CR0_PE_MASK) {
1366#ifdef TARGET_X86_64
1367 if (env->hflags & HF_LMA_MASK) {
1368 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1369 } else
1370#endif
1371 {
1372#ifdef VBOX
1373 /* int xx *, v86 code and VME enabled? */
1374 if ( (env->eflags & VM_MASK)
1375 && (env->cr[4] & CR4_VME_MASK)
1376 && is_int
1377 && !is_hw
1378 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1379 )
1380 do_soft_interrupt_vme(intno, error_code, next_eip);
1381 else
1382#endif /* VBOX */
1383 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1384 }
1385 } else {
1386 do_interrupt_real(intno, is_int, error_code, next_eip);
1387 }
1388}
1389
1390/*
1391 * Signal an interruption. It is executed in the main CPU loop.
1392 * is_int is TRUE if coming from the int instruction. next_eip is the
1393 * EIP value AFTER the interrupt instruction. It is only relevant if
1394 * is_int is TRUE.
1395 */
1396void raise_interrupt(int intno, int is_int, int error_code,
1397 int next_eip_addend)
1398{
1399#if defined(VBOX) && defined(DEBUG)
1400 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %VGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1401#endif
1402 env->exception_index = intno;
1403 env->error_code = error_code;
1404 env->exception_is_int = is_int;
1405 env->exception_next_eip = env->eip + next_eip_addend;
1406 cpu_loop_exit();
1407}
1408
1409/* same as raise_exception_err, but do not restore global registers */
1410static void raise_exception_err_norestore(int exception_index, int error_code)
1411{
1412 env->exception_index = exception_index;
1413 env->error_code = error_code;
1414 env->exception_is_int = 0;
1415 env->exception_next_eip = 0;
1416 longjmp(env->jmp_env, 1);
1417}
1418
1419/* shortcuts to generate exceptions */
1420
1421void (raise_exception_err)(int exception_index, int error_code)
1422{
1423 raise_interrupt(exception_index, 0, error_code, 0);
1424}
1425
1426void raise_exception(int exception_index)
1427{
1428 raise_interrupt(exception_index, 0, 0, 0);
1429}
1430
1431/* SMM support */
1432
1433#if defined(CONFIG_USER_ONLY)
1434
1435void do_smm_enter(void)
1436{
1437}
1438
1439void helper_rsm(void)
1440{
1441}
1442
1443#else
1444
1445#ifdef TARGET_X86_64
1446#define SMM_REVISION_ID 0x00020064
1447#else
1448#define SMM_REVISION_ID 0x00020000
1449#endif
1450
1451void do_smm_enter(void)
1452{
1453#ifdef VBOX
1454 cpu_abort(env, "do_ssm_enter");
1455#else /* !VBOX */
1456 target_ulong sm_state;
1457 SegmentCache *dt;
1458 int i, offset;
1459
1460 if (loglevel & CPU_LOG_INT) {
1461 fprintf(logfile, "SMM: enter\n");
1462 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1463 }
1464
1465 env->hflags |= HF_SMM_MASK;
1466 cpu_smm_update(env);
1467
1468 sm_state = env->smbase + 0x8000;
1469
1470#ifdef TARGET_X86_64
1471 for(i = 0; i < 6; i++) {
1472 dt = &env->segs[i];
1473 offset = 0x7e00 + i * 16;
1474 stw_phys(sm_state + offset, dt->selector);
1475 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1476 stl_phys(sm_state + offset + 4, dt->limit);
1477 stq_phys(sm_state + offset + 8, dt->base);
1478 }
1479
1480 stq_phys(sm_state + 0x7e68, env->gdt.base);
1481 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1482
1483 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1484 stq_phys(sm_state + 0x7e78, env->ldt.base);
1485 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1486 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1487
1488 stq_phys(sm_state + 0x7e88, env->idt.base);
1489 stl_phys(sm_state + 0x7e84, env->idt.limit);
1490
1491 stw_phys(sm_state + 0x7e90, env->tr.selector);
1492 stq_phys(sm_state + 0x7e98, env->tr.base);
1493 stl_phys(sm_state + 0x7e94, env->tr.limit);
1494 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1495
1496 stq_phys(sm_state + 0x7ed0, env->efer);
1497
1498 stq_phys(sm_state + 0x7ff8, EAX);
1499 stq_phys(sm_state + 0x7ff0, ECX);
1500 stq_phys(sm_state + 0x7fe8, EDX);
1501 stq_phys(sm_state + 0x7fe0, EBX);
1502 stq_phys(sm_state + 0x7fd8, ESP);
1503 stq_phys(sm_state + 0x7fd0, EBP);
1504 stq_phys(sm_state + 0x7fc8, ESI);
1505 stq_phys(sm_state + 0x7fc0, EDI);
1506 for(i = 8; i < 16; i++)
1507 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1508 stq_phys(sm_state + 0x7f78, env->eip);
1509 stl_phys(sm_state + 0x7f70, compute_eflags());
1510 stl_phys(sm_state + 0x7f68, env->dr[6]);
1511 stl_phys(sm_state + 0x7f60, env->dr[7]);
1512
1513 stl_phys(sm_state + 0x7f48, env->cr[4]);
1514 stl_phys(sm_state + 0x7f50, env->cr[3]);
1515 stl_phys(sm_state + 0x7f58, env->cr[0]);
1516
1517 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1518 stl_phys(sm_state + 0x7f00, env->smbase);
1519#else
1520 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1521 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1522 stl_phys(sm_state + 0x7ff4, compute_eflags());
1523 stl_phys(sm_state + 0x7ff0, env->eip);
1524 stl_phys(sm_state + 0x7fec, EDI);
1525 stl_phys(sm_state + 0x7fe8, ESI);
1526 stl_phys(sm_state + 0x7fe4, EBP);
1527 stl_phys(sm_state + 0x7fe0, ESP);
1528 stl_phys(sm_state + 0x7fdc, EBX);
1529 stl_phys(sm_state + 0x7fd8, EDX);
1530 stl_phys(sm_state + 0x7fd4, ECX);
1531 stl_phys(sm_state + 0x7fd0, EAX);
1532 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1533 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1534
1535 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1536 stl_phys(sm_state + 0x7f64, env->tr.base);
1537 stl_phys(sm_state + 0x7f60, env->tr.limit);
1538 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1539
1540 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1541 stl_phys(sm_state + 0x7f80, env->ldt.base);
1542 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1543 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1544
1545 stl_phys(sm_state + 0x7f74, env->gdt.base);
1546 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1547
1548 stl_phys(sm_state + 0x7f58, env->idt.base);
1549 stl_phys(sm_state + 0x7f54, env->idt.limit);
1550
1551 for(i = 0; i < 6; i++) {
1552 dt = &env->segs[i];
1553 if (i < 3)
1554 offset = 0x7f84 + i * 12;
1555 else
1556 offset = 0x7f2c + (i - 3) * 12;
1557 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1558 stl_phys(sm_state + offset + 8, dt->base);
1559 stl_phys(sm_state + offset + 4, dt->limit);
1560 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1561 }
1562 stl_phys(sm_state + 0x7f14, env->cr[4]);
1563
1564 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1565 stl_phys(sm_state + 0x7ef8, env->smbase);
1566#endif
1567 /* init SMM cpu state */
1568
1569#ifdef TARGET_X86_64
1570 env->efer = 0;
1571 env->hflags &= ~HF_LMA_MASK;
1572#endif
1573 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1574 env->eip = 0x00008000;
1575 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1576 0xffffffff, 0);
1577 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1578 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1579 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1580 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1581 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1582
1583 cpu_x86_update_cr0(env,
1584 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1585 cpu_x86_update_cr4(env, 0);
1586 env->dr[7] = 0x00000400;
1587 CC_OP = CC_OP_EFLAGS;
1588#endif /* VBOX */
1589}
1590
1591void helper_rsm(void)
1592{
1593#ifdef VBOX
1594 cpu_abort(env, "helper_rsm");
1595#else /* !VBOX */
1596 target_ulong sm_state;
1597 int i, offset;
1598 uint32_t val;
1599
1600 sm_state = env->smbase + 0x8000;
1601#ifdef TARGET_X86_64
1602 env->efer = ldq_phys(sm_state + 0x7ed0);
1603 if (env->efer & MSR_EFER_LMA)
1604 env->hflags |= HF_LMA_MASK;
1605 else
1606 env->hflags &= ~HF_LMA_MASK;
1607
1608 for(i = 0; i < 6; i++) {
1609 offset = 0x7e00 + i * 16;
1610 cpu_x86_load_seg_cache(env, i,
1611 lduw_phys(sm_state + offset),
1612 ldq_phys(sm_state + offset + 8),
1613 ldl_phys(sm_state + offset + 4),
1614 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1615 }
1616
1617 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1618 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1619
1620 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1621 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1622 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1623 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1624
1625 env->idt.base = ldq_phys(sm_state + 0x7e88);
1626 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1627
1628 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1629 env->tr.base = ldq_phys(sm_state + 0x7e98);
1630 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1631 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1632
1633 EAX = ldq_phys(sm_state + 0x7ff8);
1634 ECX = ldq_phys(sm_state + 0x7ff0);
1635 EDX = ldq_phys(sm_state + 0x7fe8);
1636 EBX = ldq_phys(sm_state + 0x7fe0);
1637 ESP = ldq_phys(sm_state + 0x7fd8);
1638 EBP = ldq_phys(sm_state + 0x7fd0);
1639 ESI = ldq_phys(sm_state + 0x7fc8);
1640 EDI = ldq_phys(sm_state + 0x7fc0);
1641 for(i = 8; i < 16; i++)
1642 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1643 env->eip = ldq_phys(sm_state + 0x7f78);
1644 load_eflags(ldl_phys(sm_state + 0x7f70),
1645 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1646 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1647 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1648
1649 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1650 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1651 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1652
1653 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1654 if (val & 0x20000) {
1655 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1656 }
1657#else
1658 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1659 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1660 load_eflags(ldl_phys(sm_state + 0x7ff4),
1661 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1662 env->eip = ldl_phys(sm_state + 0x7ff0);
1663 EDI = ldl_phys(sm_state + 0x7fec);
1664 ESI = ldl_phys(sm_state + 0x7fe8);
1665 EBP = ldl_phys(sm_state + 0x7fe4);
1666 ESP = ldl_phys(sm_state + 0x7fe0);
1667 EBX = ldl_phys(sm_state + 0x7fdc);
1668 EDX = ldl_phys(sm_state + 0x7fd8);
1669 ECX = ldl_phys(sm_state + 0x7fd4);
1670 EAX = ldl_phys(sm_state + 0x7fd0);
1671 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1672 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1673
1674 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1675 env->tr.base = ldl_phys(sm_state + 0x7f64);
1676 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1677 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1678
1679 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1680 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1681 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1682 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1683
1684 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1685 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1686
1687 env->idt.base = ldl_phys(sm_state + 0x7f58);
1688 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1689
1690 for(i = 0; i < 6; i++) {
1691 if (i < 3)
1692 offset = 0x7f84 + i * 12;
1693 else
1694 offset = 0x7f2c + (i - 3) * 12;
1695 cpu_x86_load_seg_cache(env, i,
1696 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1697 ldl_phys(sm_state + offset + 8),
1698 ldl_phys(sm_state + offset + 4),
1699 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1700 }
1701 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1702
1703 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1704 if (val & 0x20000) {
1705 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1706 }
1707#endif
1708 CC_OP = CC_OP_EFLAGS;
1709 env->hflags &= ~HF_SMM_MASK;
1710 cpu_smm_update(env);
1711
1712 if (loglevel & CPU_LOG_INT) {
1713 fprintf(logfile, "SMM: after RSM\n");
1714 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1715 }
1716#endif /* !VBOX */
1717}
1718
1719#endif /* !CONFIG_USER_ONLY */
1720
1721
1722#ifdef BUGGY_GCC_DIV64
1723/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1724 call it from another function */
1725uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1726{
1727 *q_ptr = num / den;
1728 return num % den;
1729}
1730
1731int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1732{
1733 *q_ptr = num / den;
1734 return num % den;
1735}
1736#endif
1737
1738void helper_divl_EAX_T0(void)
1739{
1740 unsigned int den, r;
1741 uint64_t num, q;
1742
1743 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1744 den = T0;
1745 if (den == 0) {
1746 raise_exception(EXCP00_DIVZ);
1747 }
1748#ifdef BUGGY_GCC_DIV64
1749 r = div32(&q, num, den);
1750#else
1751 q = (num / den);
1752 r = (num % den);
1753#endif
1754 if (q > 0xffffffff)
1755 raise_exception(EXCP00_DIVZ);
1756 EAX = (uint32_t)q;
1757 EDX = (uint32_t)r;
1758}
1759
1760void helper_idivl_EAX_T0(void)
1761{
1762 int den, r;
1763 int64_t num, q;
1764
1765 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1766 den = T0;
1767 if (den == 0) {
1768 raise_exception(EXCP00_DIVZ);
1769 }
1770#ifdef BUGGY_GCC_DIV64
1771 r = idiv32(&q, num, den);
1772#else
1773 q = (num / den);
1774 r = (num % den);
1775#endif
1776 if (q != (int32_t)q)
1777 raise_exception(EXCP00_DIVZ);
1778 EAX = (uint32_t)q;
1779 EDX = (uint32_t)r;
1780}
1781
1782void helper_cmpxchg8b(void)
1783{
1784 uint64_t d;
1785 int eflags;
1786
1787 eflags = cc_table[CC_OP].compute_all();
1788 d = ldq(A0);
1789 if (d == (((uint64_t)EDX << 32) | EAX)) {
1790 stq(A0, ((uint64_t)ECX << 32) | EBX);
1791 eflags |= CC_Z;
1792 } else {
1793 /* always do the store */
1794 stq(A0, d);
1795 EDX = (uint32_t)(d >> 32);
1796 EAX = (uint32_t)d;
1797 eflags &= ~CC_Z;
1798 }
1799 CC_SRC = eflags;
1800}
1801
1802void helper_single_step()
1803{
1804 env->dr[6] |= 0x4000;
1805 raise_exception(EXCP01_SSTP);
1806}
1807
1808void helper_cpuid(void)
1809{
1810#ifndef VBOX
1811 uint32_t index;
1812 index = (uint32_t)EAX;
1813
1814 /* test if maximum index reached */
1815 if (index & 0x80000000) {
1816 if (index > env->cpuid_xlevel)
1817 index = env->cpuid_level;
1818 } else {
1819 if (index > env->cpuid_level)
1820 index = env->cpuid_level;
1821 }
1822
1823 switch(index) {
1824 case 0:
1825 EAX = env->cpuid_level;
1826 EBX = env->cpuid_vendor1;
1827 EDX = env->cpuid_vendor2;
1828 ECX = env->cpuid_vendor3;
1829 break;
1830 case 1:
1831 EAX = env->cpuid_version;
1832 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1833 ECX = env->cpuid_ext_features;
1834 EDX = env->cpuid_features;
1835 break;
1836 case 2:
1837 /* cache info: needed for Pentium Pro compatibility */
1838 EAX = 0x410601;
1839 EBX = 0;
1840 ECX = 0;
1841 EDX = 0;
1842 break;
1843 case 0x80000000:
1844 EAX = env->cpuid_xlevel;
1845 EBX = env->cpuid_vendor1;
1846 EDX = env->cpuid_vendor2;
1847 ECX = env->cpuid_vendor3;
1848 break;
1849 case 0x80000001:
1850 EAX = env->cpuid_features;
1851 EBX = 0;
1852 ECX = 0;
1853 EDX = env->cpuid_ext2_features;
1854 break;
1855 case 0x80000002:
1856 case 0x80000003:
1857 case 0x80000004:
1858 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1859 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1860 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1861 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1862 break;
1863 case 0x80000005:
1864 /* cache info (L1 cache) */
1865 EAX = 0x01ff01ff;
1866 EBX = 0x01ff01ff;
1867 ECX = 0x40020140;
1868 EDX = 0x40020140;
1869 break;
1870 case 0x80000006:
1871 /* cache info (L2 cache) */
1872 EAX = 0;
1873 EBX = 0x42004200;
1874 ECX = 0x02008140;
1875 EDX = 0;
1876 break;
1877 case 0x80000008:
1878 /* virtual & phys address size in low 2 bytes. */
1879 EAX = 0x00003028;
1880 EBX = 0;
1881 ECX = 0;
1882 EDX = 0;
1883 break;
1884 default:
1885 /* reserved values: zero */
1886 EAX = 0;
1887 EBX = 0;
1888 ECX = 0;
1889 EDX = 0;
1890 break;
1891 }
1892#else /* VBOX */
1893 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1894#endif /* VBOX */
1895}
1896
1897void helper_enter_level(int level, int data32)
1898{
1899 target_ulong ssp;
1900 uint32_t esp_mask, esp, ebp;
1901
1902 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1903 ssp = env->segs[R_SS].base;
1904 ebp = EBP;
1905 esp = ESP;
1906 if (data32) {
1907 /* 32 bit */
1908 esp -= 4;
1909 while (--level) {
1910 esp -= 4;
1911 ebp -= 4;
1912 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1913 }
1914 esp -= 4;
1915 stl(ssp + (esp & esp_mask), T1);
1916 } else {
1917 /* 16 bit */
1918 esp -= 2;
1919 while (--level) {
1920 esp -= 2;
1921 ebp -= 2;
1922 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1923 }
1924 esp -= 2;
1925 stw(ssp + (esp & esp_mask), T1);
1926 }
1927}
1928
1929#ifdef TARGET_X86_64
1930void helper_enter64_level(int level, int data64)
1931{
1932 target_ulong esp, ebp;
1933 ebp = EBP;
1934 esp = ESP;
1935
1936 if (data64) {
1937 /* 64 bit */
1938 esp -= 8;
1939 while (--level) {
1940 esp -= 8;
1941 ebp -= 8;
1942 stq(esp, ldq(ebp));
1943 }
1944 esp -= 8;
1945 stq(esp, T1);
1946 } else {
1947 /* 16 bit */
1948 esp -= 2;
1949 while (--level) {
1950 esp -= 2;
1951 ebp -= 2;
1952 stw(esp, lduw(ebp));
1953 }
1954 esp -= 2;
1955 stw(esp, T1);
1956 }
1957}
1958#endif
1959
1960void helper_lldt_T0(void)
1961{
1962 int selector;
1963 SegmentCache *dt;
1964 uint32_t e1, e2;
1965 int index, entry_limit;
1966 target_ulong ptr;
1967#ifdef VBOX
1968 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1969 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1970#endif
1971
1972 selector = T0 & 0xffff;
1973 if ((selector & 0xfffc) == 0) {
1974 /* XXX: NULL selector case: invalid LDT */
1975 env->ldt.base = 0;
1976 env->ldt.limit = 0;
1977 } else {
1978 if (selector & 0x4)
1979 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1980 dt = &env->gdt;
1981 index = selector & ~7;
1982#ifdef TARGET_X86_64
1983 if (env->hflags & HF_LMA_MASK)
1984 entry_limit = 15;
1985 else
1986#endif
1987 entry_limit = 7;
1988 if ((index + entry_limit) > dt->limit)
1989 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1990 ptr = dt->base + index;
1991 e1 = ldl_kernel(ptr);
1992 e2 = ldl_kernel(ptr + 4);
1993 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1995 if (!(e2 & DESC_P_MASK))
1996 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1997#ifdef TARGET_X86_64
1998 if (env->hflags & HF_LMA_MASK) {
1999 uint32_t e3;
2000 e3 = ldl_kernel(ptr + 8);
2001 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2002 env->ldt.base |= (target_ulong)e3 << 32;
2003 } else
2004#endif
2005 {
2006 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2007 }
2008 }
2009 env->ldt.selector = selector;
2010#ifdef VBOX
2011 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
2012 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2013#endif
2014}
2015
2016void helper_ltr_T0(void)
2017{
2018 int selector;
2019 SegmentCache *dt;
2020 uint32_t e1, e2;
2021 int index, type, entry_limit;
2022 target_ulong ptr;
2023
2024#ifdef VBOX
2025 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2026 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2027 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2028#endif
2029
2030 selector = T0 & 0xffff;
2031 if ((selector & 0xfffc) == 0) {
2032 /* NULL selector case: invalid TR */
2033 env->tr.base = 0;
2034 env->tr.limit = 0;
2035 env->tr.flags = 0;
2036 } else {
2037 if (selector & 0x4)
2038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039 dt = &env->gdt;
2040 index = selector & ~7;
2041#ifdef TARGET_X86_64
2042 if (env->hflags & HF_LMA_MASK)
2043 entry_limit = 15;
2044 else
2045#endif
2046 entry_limit = 7;
2047 if ((index + entry_limit) > dt->limit)
2048 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2049 ptr = dt->base + index;
2050 e1 = ldl_kernel(ptr);
2051 e2 = ldl_kernel(ptr + 4);
2052 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2053 if ((e2 & DESC_S_MASK) ||
2054 (type != 1 && type != 9))
2055 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2056 if (!(e2 & DESC_P_MASK))
2057 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2058#ifdef TARGET_X86_64
2059 if (env->hflags & HF_LMA_MASK) {
2060 uint32_t e3;
2061 e3 = ldl_kernel(ptr + 8);
2062 load_seg_cache_raw_dt(&env->tr, e1, e2);
2063 env->tr.base |= (target_ulong)e3 << 32;
2064 } else
2065#endif
2066 {
2067 load_seg_cache_raw_dt(&env->tr, e1, e2);
2068 }
2069 e2 |= DESC_TSS_BUSY_MASK;
2070 stl_kernel(ptr + 4, e2);
2071 }
2072 env->tr.selector = selector;
2073#ifdef VBOX
2074 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2075 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2076 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2077#endif
2078}
2079
2080/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2081void load_seg(int seg_reg, int selector)
2082{
2083 uint32_t e1, e2;
2084 int cpl, dpl, rpl;
2085 SegmentCache *dt;
2086 int index;
2087 target_ulong ptr;
2088
2089 selector &= 0xffff;
2090 cpl = env->hflags & HF_CPL_MASK;
2091
2092#ifdef VBOX
2093 /* Trying to load a selector with CPL=1? */
2094 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2095 {
2096 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2097 selector = selector & 0xfffc;
2098 }
2099#endif
2100
2101 if ((selector & 0xfffc) == 0) {
2102 /* null selector case */
2103 if (seg_reg == R_SS
2104#ifdef TARGET_X86_64
2105 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2106#endif
2107 )
2108 raise_exception_err(EXCP0D_GPF, 0);
2109 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2110 } else {
2111
2112 if (selector & 0x4)
2113 dt = &env->ldt;
2114 else
2115 dt = &env->gdt;
2116 index = selector & ~7;
2117 if ((index + 7) > dt->limit)
2118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 ptr = dt->base + index;
2120 e1 = ldl_kernel(ptr);
2121 e2 = ldl_kernel(ptr + 4);
2122
2123 if (!(e2 & DESC_S_MASK))
2124 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2125 rpl = selector & 3;
2126 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2127 if (seg_reg == R_SS) {
2128 /* must be writable segment */
2129 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 if (rpl != cpl || dpl != cpl)
2132 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133 } else {
2134 /* must be readable segment */
2135 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2136 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137
2138 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2139 /* if not conforming code, test rights */
2140 if (dpl < cpl || dpl < rpl)
2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142 }
2143 }
2144
2145 if (!(e2 & DESC_P_MASK)) {
2146 if (seg_reg == R_SS)
2147 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2148 else
2149 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2150 }
2151
2152 /* set the access bit if not already set */
2153 if (!(e2 & DESC_A_MASK)) {
2154 e2 |= DESC_A_MASK;
2155 stl_kernel(ptr + 4, e2);
2156 }
2157
2158 cpu_x86_load_seg_cache(env, seg_reg, selector,
2159 get_seg_base(e1, e2),
2160 get_seg_limit(e1, e2),
2161 e2);
2162#if 0
2163 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2164 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2165#endif
2166 }
2167}
2168
2169/* protected mode jump */
2170void helper_ljmp_protected_T0_T1(int next_eip_addend)
2171{
2172 int new_cs, gate_cs, type;
2173 uint32_t e1, e2, cpl, dpl, rpl, limit;
2174 target_ulong new_eip, next_eip;
2175
2176 new_cs = T0;
2177 new_eip = T1;
2178 if ((new_cs & 0xfffc) == 0)
2179 raise_exception_err(EXCP0D_GPF, 0);
2180 if (load_segment(&e1, &e2, new_cs) != 0)
2181 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2182 cpl = env->hflags & HF_CPL_MASK;
2183 if (e2 & DESC_S_MASK) {
2184 if (!(e2 & DESC_CS_MASK))
2185 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2186 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2187 if (e2 & DESC_C_MASK) {
2188 /* conforming code segment */
2189 if (dpl > cpl)
2190 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191 } else {
2192 /* non conforming code segment */
2193 rpl = new_cs & 3;
2194 if (rpl > cpl)
2195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2196 if (dpl != cpl)
2197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198 }
2199 if (!(e2 & DESC_P_MASK))
2200 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2201 limit = get_seg_limit(e1, e2);
2202 if (new_eip > limit &&
2203 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2204 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2205 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2206 get_seg_base(e1, e2), limit, e2);
2207 EIP = new_eip;
2208 } else {
2209 /* jump to call or task gate */
2210 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211 rpl = new_cs & 3;
2212 cpl = env->hflags & HF_CPL_MASK;
2213 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2214 switch(type) {
2215 case 1: /* 286 TSS */
2216 case 9: /* 386 TSS */
2217 case 5: /* task gate */
2218 if (dpl < cpl || dpl < rpl)
2219 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2220 next_eip = env->eip + next_eip_addend;
2221 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2222 CC_OP = CC_OP_EFLAGS;
2223 break;
2224 case 4: /* 286 call gate */
2225 case 12: /* 386 call gate */
2226 if ((dpl < cpl) || (dpl < rpl))
2227 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2228 if (!(e2 & DESC_P_MASK))
2229 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2230 gate_cs = e1 >> 16;
2231 new_eip = (e1 & 0xffff);
2232 if (type == 12)
2233 new_eip |= (e2 & 0xffff0000);
2234 if (load_segment(&e1, &e2, gate_cs) != 0)
2235 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237 /* must be code segment */
2238 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2239 (DESC_S_MASK | DESC_CS_MASK)))
2240 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2241 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2242 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2243 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2244 if (!(e2 & DESC_P_MASK))
2245#ifdef VBOX /* See page 3-514 of 253666.pdf */
2246 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2247#else
2248 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2249#endif
2250 limit = get_seg_limit(e1, e2);
2251 if (new_eip > limit)
2252 raise_exception_err(EXCP0D_GPF, 0);
2253 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2254 get_seg_base(e1, e2), limit, e2);
2255 EIP = new_eip;
2256 break;
2257 default:
2258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2259 break;
2260 }
2261 }
2262}
2263
2264/* real mode call */
2265void helper_lcall_real_T0_T1(int shift, int next_eip)
2266{
2267 int new_cs, new_eip;
2268 uint32_t esp, esp_mask;
2269 target_ulong ssp;
2270
2271 new_cs = T0;
2272 new_eip = T1;
2273 esp = ESP;
2274 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2275 ssp = env->segs[R_SS].base;
2276 if (shift) {
2277 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2278 PUSHL(ssp, esp, esp_mask, next_eip);
2279 } else {
2280 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2281 PUSHW(ssp, esp, esp_mask, next_eip);
2282 }
2283
2284 SET_ESP(esp, esp_mask);
2285 env->eip = new_eip;
2286 env->segs[R_CS].selector = new_cs;
2287 env->segs[R_CS].base = (new_cs << 4);
2288}
2289
2290/* protected mode call */
2291void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2292{
2293 int new_cs, new_stack, i;
2294 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2295 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2296 uint32_t val, limit, old_sp_mask;
2297 target_ulong ssp, old_ssp, next_eip, new_eip;
2298
2299 new_cs = T0;
2300 new_eip = T1;
2301 next_eip = env->eip + next_eip_addend;
2302#ifdef DEBUG_PCALL
2303 if (loglevel & CPU_LOG_PCALL) {
2304 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2305 new_cs, (uint32_t)new_eip, shift);
2306 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2307 }
2308#endif
2309 if ((new_cs & 0xfffc) == 0)
2310 raise_exception_err(EXCP0D_GPF, 0);
2311 if (load_segment(&e1, &e2, new_cs) != 0)
2312 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2313 cpl = env->hflags & HF_CPL_MASK;
2314#ifdef DEBUG_PCALL
2315 if (loglevel & CPU_LOG_PCALL) {
2316 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2317 }
2318#endif
2319 if (e2 & DESC_S_MASK) {
2320 if (!(e2 & DESC_CS_MASK))
2321 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2322 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2323 if (e2 & DESC_C_MASK) {
2324 /* conforming code segment */
2325 if (dpl > cpl)
2326 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2327 } else {
2328 /* non conforming code segment */
2329 rpl = new_cs & 3;
2330 if (rpl > cpl)
2331 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2332 if (dpl != cpl)
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 }
2335 if (!(e2 & DESC_P_MASK))
2336 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2337
2338#ifdef TARGET_X86_64
2339 /* XXX: check 16/32 bit cases in long mode */
2340 if (shift == 2) {
2341 target_ulong rsp;
2342 /* 64 bit case */
2343 rsp = ESP;
2344 PUSHQ(rsp, env->segs[R_CS].selector);
2345 PUSHQ(rsp, next_eip);
2346 /* from this point, not restartable */
2347 ESP = rsp;
2348 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2349 get_seg_base(e1, e2),
2350 get_seg_limit(e1, e2), e2);
2351 EIP = new_eip;
2352 } else
2353#endif
2354 {
2355 sp = ESP;
2356 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2357 ssp = env->segs[R_SS].base;
2358 if (shift) {
2359 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2360 PUSHL(ssp, sp, sp_mask, next_eip);
2361 } else {
2362 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2363 PUSHW(ssp, sp, sp_mask, next_eip);
2364 }
2365
2366 limit = get_seg_limit(e1, e2);
2367 if (new_eip > limit)
2368 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369 /* from this point, not restartable */
2370 SET_ESP(sp, sp_mask);
2371 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2372 get_seg_base(e1, e2), limit, e2);
2373 EIP = new_eip;
2374 }
2375 } else {
2376 /* check gate type */
2377 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2378 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2379 rpl = new_cs & 3;
2380 switch(type) {
2381 case 1: /* available 286 TSS */
2382 case 9: /* available 386 TSS */
2383 case 5: /* task gate */
2384 if (dpl < cpl || dpl < rpl)
2385 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2387 CC_OP = CC_OP_EFLAGS;
2388 return;
2389 case 4: /* 286 call gate */
2390 case 12: /* 386 call gate */
2391 break;
2392 default:
2393 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2394 break;
2395 }
2396 shift = type >> 3;
2397
2398 if (dpl < cpl || dpl < rpl)
2399 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400 /* check valid bit */
2401 if (!(e2 & DESC_P_MASK))
2402 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2403 selector = e1 >> 16;
2404 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2405 param_count = e2 & 0x1f;
2406 if ((selector & 0xfffc) == 0)
2407 raise_exception_err(EXCP0D_GPF, 0);
2408
2409 if (load_segment(&e1, &e2, selector) != 0)
2410 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2411 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2412 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2413 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2414 if (dpl > cpl)
2415 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2416 if (!(e2 & DESC_P_MASK))
2417 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2418
2419 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2420 /* to inner priviledge */
2421 get_ss_esp_from_tss(&ss, &sp, dpl);
2422#ifdef DEBUG_PCALL
2423 if (loglevel & CPU_LOG_PCALL)
2424 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2425 ss, sp, param_count, ESP);
2426#endif
2427 if ((ss & 0xfffc) == 0)
2428 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2429 if ((ss & 3) != dpl)
2430 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2431 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2432 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2433 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2434 if (ss_dpl != dpl)
2435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436 if (!(ss_e2 & DESC_S_MASK) ||
2437 (ss_e2 & DESC_CS_MASK) ||
2438 !(ss_e2 & DESC_W_MASK))
2439 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440 if (!(ss_e2 & DESC_P_MASK))
2441#ifdef VBOX /* See page 3-99 of 253666.pdf */
2442 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2443#else
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445#endif
2446
2447 // push_size = ((param_count * 2) + 8) << shift;
2448
2449 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2450 old_ssp = env->segs[R_SS].base;
2451
2452 sp_mask = get_sp_mask(ss_e2);
2453 ssp = get_seg_base(ss_e1, ss_e2);
2454 if (shift) {
2455 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2456 PUSHL(ssp, sp, sp_mask, ESP);
2457 for(i = param_count - 1; i >= 0; i--) {
2458 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2459 PUSHL(ssp, sp, sp_mask, val);
2460 }
2461 } else {
2462 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2463 PUSHW(ssp, sp, sp_mask, ESP);
2464 for(i = param_count - 1; i >= 0; i--) {
2465 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2466 PUSHW(ssp, sp, sp_mask, val);
2467 }
2468 }
2469 new_stack = 1;
2470 } else {
2471 /* to same priviledge */
2472 sp = ESP;
2473 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2474 ssp = env->segs[R_SS].base;
2475 // push_size = (4 << shift);
2476 new_stack = 0;
2477 }
2478
2479 if (shift) {
2480 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481 PUSHL(ssp, sp, sp_mask, next_eip);
2482 } else {
2483 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484 PUSHW(ssp, sp, sp_mask, next_eip);
2485 }
2486
2487 /* from this point, not restartable */
2488
2489 if (new_stack) {
2490 ss = (ss & ~3) | dpl;
2491 cpu_x86_load_seg_cache(env, R_SS, ss,
2492 ssp,
2493 get_seg_limit(ss_e1, ss_e2),
2494 ss_e2);
2495 }
2496
2497 selector = (selector & ~3) | dpl;
2498 cpu_x86_load_seg_cache(env, R_CS, selector,
2499 get_seg_base(e1, e2),
2500 get_seg_limit(e1, e2),
2501 e2);
2502 cpu_x86_set_cpl(env, dpl);
2503 SET_ESP(sp, sp_mask);
2504 EIP = offset;
2505 }
2506#ifdef USE_KQEMU
2507 if (kqemu_is_ok(env)) {
2508 env->exception_index = -1;
2509 cpu_loop_exit();
2510 }
2511#endif
2512}
2513
2514/* real and vm86 mode iret */
2515void helper_iret_real(int shift)
2516{
2517 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2518 target_ulong ssp;
2519 int eflags_mask;
2520#ifdef VBOX
2521 bool fVME = false;
2522
2523 remR3TrapClear(env->pVM);
2524#endif /* VBOX */
2525
2526 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2527 sp = ESP;
2528 ssp = env->segs[R_SS].base;
2529 if (shift == 1) {
2530 /* 32 bits */
2531 POPL(ssp, sp, sp_mask, new_eip);
2532 POPL(ssp, sp, sp_mask, new_cs);
2533 new_cs &= 0xffff;
2534 POPL(ssp, sp, sp_mask, new_eflags);
2535 } else {
2536 /* 16 bits */
2537 POPW(ssp, sp, sp_mask, new_eip);
2538 POPW(ssp, sp, sp_mask, new_cs);
2539 POPW(ssp, sp, sp_mask, new_eflags);
2540 }
2541#ifdef VBOX
2542 if ( (env->eflags & VM_MASK)
2543 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2544 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2545 {
2546 fVME = true;
2547 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2548 /* if TF will be set -> #GP */
2549 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2550 || (new_eflags & TF_MASK))
2551 raise_exception(EXCP0D_GPF);
2552 }
2553#endif /* VBOX */
2554
2555 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2556 load_seg_vm(R_CS, new_cs);
2557 env->eip = new_eip;
2558#ifdef VBOX
2559 if (fVME)
2560 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2561 else
2562#endif
2563 if (env->eflags & VM_MASK)
2564 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2565 else
2566 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2567 if (shift == 0)
2568 eflags_mask &= 0xffff;
2569 load_eflags(new_eflags, eflags_mask);
2570
2571#ifdef VBOX
2572 if (fVME)
2573 {
2574 if (new_eflags & IF_MASK)
2575 env->eflags |= VIF_MASK;
2576 else
2577 env->eflags &= ~VIF_MASK;
2578 }
2579#endif /* VBOX */
2580}
2581
2582static inline void validate_seg(int seg_reg, int cpl)
2583{
2584 int dpl;
2585 uint32_t e2;
2586
2587 /* XXX: on x86_64, we do not want to nullify FS and GS because
2588 they may still contain a valid base. I would be interested to
2589 know how a real x86_64 CPU behaves */
2590 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2591 (env->segs[seg_reg].selector & 0xfffc) == 0)
2592 return;
2593
2594 e2 = env->segs[seg_reg].flags;
2595 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2596 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2597 /* data or non conforming code segment */
2598 if (dpl < cpl) {
2599 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2600 }
2601 }
2602}
2603
2604/* protected mode iret */
2605static inline void helper_ret_protected(int shift, int is_iret, int addend)
2606{
2607 uint32_t new_cs, new_eflags, new_ss;
2608 uint32_t new_es, new_ds, new_fs, new_gs;
2609 uint32_t e1, e2, ss_e1, ss_e2;
2610 int cpl, dpl, rpl, eflags_mask, iopl;
2611 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2612
2613#ifdef TARGET_X86_64
2614 if (shift == 2)
2615 sp_mask = -1;
2616 else
2617#endif
2618 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2619 sp = ESP;
2620 ssp = env->segs[R_SS].base;
2621 new_eflags = 0; /* avoid warning */
2622#ifdef TARGET_X86_64
2623 if (shift == 2) {
2624 POPQ(sp, new_eip);
2625 POPQ(sp, new_cs);
2626 new_cs &= 0xffff;
2627 if (is_iret) {
2628 POPQ(sp, new_eflags);
2629 }
2630 } else
2631#endif
2632 if (shift == 1) {
2633 /* 32 bits */
2634 POPL(ssp, sp, sp_mask, new_eip);
2635 POPL(ssp, sp, sp_mask, new_cs);
2636 new_cs &= 0xffff;
2637 if (is_iret) {
2638 POPL(ssp, sp, sp_mask, new_eflags);
2639#if defined(VBOX) && defined(DEBUG)
2640 printf("iret: new CS %04X\n", new_cs);
2641 printf("iret: new EIP %08X\n", new_eip);
2642 printf("iret: new EFLAGS %08X\n", new_eflags);
2643 printf("iret: EAX=%08x\n", EAX);
2644#endif
2645
2646 if (new_eflags & VM_MASK)
2647 goto return_to_vm86;
2648 }
2649#ifdef VBOX
2650 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2651 {
2652#ifdef DEBUG
2653 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2654#endif
2655 new_cs = new_cs & 0xfffc;
2656 }
2657#endif
2658 } else {
2659 /* 16 bits */
2660 POPW(ssp, sp, sp_mask, new_eip);
2661 POPW(ssp, sp, sp_mask, new_cs);
2662 if (is_iret)
2663 POPW(ssp, sp, sp_mask, new_eflags);
2664 }
2665#ifdef DEBUG_PCALL
2666 if (loglevel & CPU_LOG_PCALL) {
2667 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2668 new_cs, new_eip, shift, addend);
2669 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2670 }
2671#endif
2672 if ((new_cs & 0xfffc) == 0)
2673 {
2674#if defined(VBOX) && defined(DEBUG)
2675 printf("new_cs & 0xfffc) == 0\n");
2676#endif
2677 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678 }
2679 if (load_segment(&e1, &e2, new_cs) != 0)
2680 {
2681#if defined(VBOX) && defined(DEBUG)
2682 printf("load_segment failed\n");
2683#endif
2684 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2685 }
2686 if (!(e2 & DESC_S_MASK) ||
2687 !(e2 & DESC_CS_MASK))
2688 {
2689#if defined(VBOX) && defined(DEBUG)
2690 printf("e2 mask %08x\n", e2);
2691#endif
2692 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693 }
2694 cpl = env->hflags & HF_CPL_MASK;
2695 rpl = new_cs & 3;
2696 if (rpl < cpl)
2697 {
2698#if defined(VBOX) && defined(DEBUG)
2699 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2700#endif
2701 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2702 }
2703 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2704 if (e2 & DESC_C_MASK) {
2705 if (dpl > rpl)
2706 {
2707#if defined(VBOX) && defined(DEBUG)
2708 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2709#endif
2710 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2711 }
2712 } else {
2713 if (dpl != rpl)
2714 {
2715#if defined(VBOX) && defined(DEBUG)
2716 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2717#endif
2718 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2719 }
2720 }
2721 if (!(e2 & DESC_P_MASK))
2722 {
2723#if defined(VBOX) && defined(DEBUG)
2724 printf("DESC_P_MASK e2=%08x\n", e2);
2725#endif
2726 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2727 }
2728 sp += addend;
2729 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2730 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2731 /* return to same priledge level */
2732 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2733 get_seg_base(e1, e2),
2734 get_seg_limit(e1, e2),
2735 e2);
2736 } else {
2737 /* return to different priviledge level */
2738#ifdef TARGET_X86_64
2739 if (shift == 2) {
2740 POPQ(sp, new_esp);
2741 POPQ(sp, new_ss);
2742 new_ss &= 0xffff;
2743 } else
2744#endif
2745 if (shift == 1) {
2746 /* 32 bits */
2747 POPL(ssp, sp, sp_mask, new_esp);
2748 POPL(ssp, sp, sp_mask, new_ss);
2749 new_ss &= 0xffff;
2750 } else {
2751 /* 16 bits */
2752 POPW(ssp, sp, sp_mask, new_esp);
2753 POPW(ssp, sp, sp_mask, new_ss);
2754 }
2755#ifdef DEBUG_PCALL
2756 if (loglevel & CPU_LOG_PCALL) {
2757 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2758 new_ss, new_esp);
2759 }
2760#endif
2761 if ((new_ss & 0xfffc) == 0) {
2762#ifdef TARGET_X86_64
2763 /* NULL ss is allowed in long mode if cpl != 3*/
2764 /* XXX: test CS64 ? */
2765 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2766 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2767 0, 0xffffffff,
2768 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2769 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2770 DESC_W_MASK | DESC_A_MASK);
2771 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2772 } else
2773#endif
2774 {
2775 raise_exception_err(EXCP0D_GPF, 0);
2776 }
2777 } else {
2778 if ((new_ss & 3) != rpl)
2779 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2780 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2781 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2782 if (!(ss_e2 & DESC_S_MASK) ||
2783 (ss_e2 & DESC_CS_MASK) ||
2784 !(ss_e2 & DESC_W_MASK))
2785 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2786 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2787 if (dpl != rpl)
2788 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2789 if (!(ss_e2 & DESC_P_MASK))
2790 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2791 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2792 get_seg_base(ss_e1, ss_e2),
2793 get_seg_limit(ss_e1, ss_e2),
2794 ss_e2);
2795 }
2796
2797 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2798 get_seg_base(e1, e2),
2799 get_seg_limit(e1, e2),
2800 e2);
2801 cpu_x86_set_cpl(env, rpl);
2802 sp = new_esp;
2803#ifdef TARGET_X86_64
2804 if (env->hflags & HF_CS64_MASK)
2805 sp_mask = -1;
2806 else
2807#endif
2808 sp_mask = get_sp_mask(ss_e2);
2809
2810 /* validate data segments */
2811 validate_seg(R_ES, rpl);
2812 validate_seg(R_DS, rpl);
2813 validate_seg(R_FS, rpl);
2814 validate_seg(R_GS, rpl);
2815
2816 sp += addend;
2817 }
2818 SET_ESP(sp, sp_mask);
2819 env->eip = new_eip;
2820 if (is_iret) {
2821 /* NOTE: 'cpl' is the _old_ CPL */
2822 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2823 if (cpl == 0)
2824#ifdef VBOX
2825 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2826#else
2827 eflags_mask |= IOPL_MASK;
2828#endif
2829 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2830 if (cpl <= iopl)
2831 eflags_mask |= IF_MASK;
2832 if (shift == 0)
2833 eflags_mask &= 0xffff;
2834 load_eflags(new_eflags, eflags_mask);
2835 }
2836 return;
2837
2838 return_to_vm86:
2839
2840#if 0 // defined(VBOX) && defined(DEBUG)
2841 printf("V86: new CS %04X\n", new_cs);
2842 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2843 printf("V86: new EIP %08X\n", new_eip);
2844 printf("V86: new EFLAGS %08X\n", new_eflags);
2845#endif
2846
2847 POPL(ssp, sp, sp_mask, new_esp);
2848 POPL(ssp, sp, sp_mask, new_ss);
2849 POPL(ssp, sp, sp_mask, new_es);
2850 POPL(ssp, sp, sp_mask, new_ds);
2851 POPL(ssp, sp, sp_mask, new_fs);
2852 POPL(ssp, sp, sp_mask, new_gs);
2853
2854 /* modify processor state */
2855 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2856 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2857 load_seg_vm(R_CS, new_cs & 0xffff);
2858 cpu_x86_set_cpl(env, 3);
2859 load_seg_vm(R_SS, new_ss & 0xffff);
2860 load_seg_vm(R_ES, new_es & 0xffff);
2861 load_seg_vm(R_DS, new_ds & 0xffff);
2862 load_seg_vm(R_FS, new_fs & 0xffff);
2863 load_seg_vm(R_GS, new_gs & 0xffff);
2864
2865 env->eip = new_eip & 0xffff;
2866 ESP = new_esp;
2867}
2868
2869void helper_iret_protected(int shift, int next_eip)
2870{
2871 int tss_selector, type;
2872 uint32_t e1, e2;
2873
2874#ifdef VBOX
2875 remR3TrapClear(env->pVM);
2876#endif
2877
2878 /* specific case for TSS */
2879 if (env->eflags & NT_MASK) {
2880#ifdef TARGET_X86_64
2881 if (env->hflags & HF_LMA_MASK)
2882 raise_exception_err(EXCP0D_GPF, 0);
2883#endif
2884 tss_selector = lduw_kernel(env->tr.base + 0);
2885 if (tss_selector & 4)
2886 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2887 if (load_segment(&e1, &e2, tss_selector) != 0)
2888 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2889 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2890 /* NOTE: we check both segment and busy TSS */
2891 if (type != 3)
2892 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2893 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2894 } else {
2895 helper_ret_protected(shift, 1, 0);
2896 }
2897#ifdef USE_KQEMU
2898 if (kqemu_is_ok(env)) {
2899 CC_OP = CC_OP_EFLAGS;
2900 env->exception_index = -1;
2901 cpu_loop_exit();
2902 }
2903#endif
2904}
2905
2906void helper_lret_protected(int shift, int addend)
2907{
2908 helper_ret_protected(shift, 0, addend);
2909#ifdef USE_KQEMU
2910 if (kqemu_is_ok(env)) {
2911 env->exception_index = -1;
2912 cpu_loop_exit();
2913 }
2914#endif
2915}
2916
2917void helper_sysenter(void)
2918{
2919 if (env->sysenter_cs == 0) {
2920 raise_exception_err(EXCP0D_GPF, 0);
2921 }
2922 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2923 cpu_x86_set_cpl(env, 0);
2924 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2925 0, 0xffffffff,
2926 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2927 DESC_S_MASK |
2928 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2929 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2930 0, 0xffffffff,
2931 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2932 DESC_S_MASK |
2933 DESC_W_MASK | DESC_A_MASK);
2934 ESP = env->sysenter_esp;
2935 EIP = env->sysenter_eip;
2936}
2937
2938void helper_sysexit(void)
2939{
2940 int cpl;
2941
2942 cpl = env->hflags & HF_CPL_MASK;
2943 if (env->sysenter_cs == 0 || cpl != 0) {
2944 raise_exception_err(EXCP0D_GPF, 0);
2945 }
2946 cpu_x86_set_cpl(env, 3);
2947 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2948 0, 0xffffffff,
2949 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2950 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2951 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2952 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2953 0, 0xffffffff,
2954 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2955 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2956 DESC_W_MASK | DESC_A_MASK);
2957 ESP = ECX;
2958 EIP = EDX;
2959#ifdef USE_KQEMU
2960 if (kqemu_is_ok(env)) {
2961 env->exception_index = -1;
2962 cpu_loop_exit();
2963 }
2964#endif
2965}
2966
2967void helper_movl_crN_T0(int reg)
2968{
2969#if !defined(CONFIG_USER_ONLY)
2970 switch(reg) {
2971 case 0:
2972 cpu_x86_update_cr0(env, T0);
2973 break;
2974 case 3:
2975 cpu_x86_update_cr3(env, T0);
2976 break;
2977 case 4:
2978 cpu_x86_update_cr4(env, T0);
2979 break;
2980 case 8:
2981 cpu_set_apic_tpr(env, T0);
2982 break;
2983 default:
2984 env->cr[reg] = T0;
2985 break;
2986 }
2987#endif
2988}
2989
2990/* XXX: do more */
2991void helper_movl_drN_T0(int reg)
2992{
2993 env->dr[reg] = T0;
2994}
2995
2996void helper_invlpg(target_ulong addr)
2997{
2998 cpu_x86_flush_tlb(env, addr);
2999}
3000
3001void helper_rdtsc(void)
3002{
3003 uint64_t val;
3004
3005 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3006 raise_exception(EXCP0D_GPF);
3007 }
3008 val = cpu_get_tsc(env);
3009 EAX = (uint32_t)(val);
3010 EDX = (uint32_t)(val >> 32);
3011}
3012
3013#if defined(CONFIG_USER_ONLY)
3014void helper_wrmsr(void)
3015{
3016}
3017
3018void helper_rdmsr(void)
3019{
3020}
3021#else
3022void helper_wrmsr(void)
3023{
3024 uint64_t val;
3025
3026 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3027
3028 switch((uint32_t)ECX) {
3029 case MSR_IA32_SYSENTER_CS:
3030 env->sysenter_cs = val & 0xffff;
3031 break;
3032 case MSR_IA32_SYSENTER_ESP:
3033 env->sysenter_esp = val;
3034 break;
3035 case MSR_IA32_SYSENTER_EIP:
3036 env->sysenter_eip = val;
3037 break;
3038 case MSR_IA32_APICBASE:
3039 cpu_set_apic_base(env, val);
3040 break;
3041 case MSR_EFER:
3042 {
3043 uint64_t update_mask;
3044 update_mask = 0;
3045 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3046 update_mask |= MSR_EFER_SCE;
3047 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3048 update_mask |= MSR_EFER_LME;
3049 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3050 update_mask |= MSR_EFER_FFXSR;
3051 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3052 update_mask |= MSR_EFER_NXE;
3053 env->efer = (env->efer & ~update_mask) |
3054 (val & update_mask);
3055 }
3056 break;
3057 case MSR_STAR:
3058 env->star = val;
3059 break;
3060 case MSR_PAT:
3061 env->pat = val;
3062 break;
3063#ifdef TARGET_X86_64
3064 case MSR_LSTAR:
3065 env->lstar = val;
3066 break;
3067 case MSR_CSTAR:
3068 env->cstar = val;
3069 break;
3070 case MSR_FMASK:
3071 env->fmask = val;
3072 break;
3073 case MSR_FSBASE:
3074 env->segs[R_FS].base = val;
3075 break;
3076 case MSR_GSBASE:
3077 env->segs[R_GS].base = val;
3078 break;
3079 case MSR_KERNELGSBASE:
3080 env->kernelgsbase = val;
3081 break;
3082#endif
3083 default:
3084#ifndef VBOX
3085 /* XXX: exception ? */
3086 break;
3087#else /* VBOX */
3088 {
3089 uint32_t ecx = (uint32_t)ECX;
3090 /* In X2APIC specification this range is reserved for APIC control. */
3091 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3092 cpu_apic_wrmsr(env, ecx, val);
3093 /** @todo else exception? */
3094 break;
3095 }
3096#endif /* VBOX */
3097 }
3098}
3099
3100void helper_rdmsr(void)
3101{
3102 uint64_t val;
3103 switch((uint32_t)ECX) {
3104 case MSR_IA32_SYSENTER_CS:
3105 val = env->sysenter_cs;
3106 break;
3107 case MSR_IA32_SYSENTER_ESP:
3108 val = env->sysenter_esp;
3109 break;
3110 case MSR_IA32_SYSENTER_EIP:
3111 val = env->sysenter_eip;
3112 break;
3113 case MSR_IA32_APICBASE:
3114 val = cpu_get_apic_base(env);
3115 break;
3116 case MSR_EFER:
3117 val = env->efer;
3118 break;
3119 case MSR_STAR:
3120 val = env->star;
3121 break;
3122 case MSR_PAT:
3123 val = env->pat;
3124 break;
3125#ifdef TARGET_X86_64
3126 case MSR_LSTAR:
3127 val = env->lstar;
3128 break;
3129 case MSR_CSTAR:
3130 val = env->cstar;
3131 break;
3132 case MSR_FMASK:
3133 val = env->fmask;
3134 break;
3135 case MSR_FSBASE:
3136 val = env->segs[R_FS].base;
3137 break;
3138 case MSR_GSBASE:
3139 val = env->segs[R_GS].base;
3140 break;
3141 case MSR_KERNELGSBASE:
3142 val = env->kernelgsbase;
3143 break;
3144#endif
3145 default:
3146#ifndef VBOX
3147 /* XXX: exception ? */
3148 val = 0;
3149 break;
3150#else /* VBOX */
3151 {
3152 uint32_t ecx = (uint32_t)ECX;
3153 /* In X2APIC specification this range is reserved for APIC control. */
3154 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3155 val = cpu_apic_rdmsr(env, ecx);
3156 else
3157 val = 0; /** @todo else exception? */
3158 break;
3159 }
3160#endif /* VBOX */
3161 }
3162 EAX = (uint32_t)(val);
3163 EDX = (uint32_t)(val >> 32);
3164}
3165#endif
3166
3167void helper_lsl(void)
3168{
3169 unsigned int selector, limit;
3170 uint32_t e1, e2, eflags;
3171 int rpl, dpl, cpl, type;
3172
3173 eflags = cc_table[CC_OP].compute_all();
3174 selector = T0 & 0xffff;
3175 if (load_segment(&e1, &e2, selector) != 0)
3176 goto fail;
3177 rpl = selector & 3;
3178 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3179 cpl = env->hflags & HF_CPL_MASK;
3180 if (e2 & DESC_S_MASK) {
3181 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3182 /* conforming */
3183 } else {
3184 if (dpl < cpl || dpl < rpl)
3185 goto fail;
3186 }
3187 } else {
3188 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3189 switch(type) {
3190 case 1:
3191 case 2:
3192 case 3:
3193 case 9:
3194 case 11:
3195 break;
3196 default:
3197 goto fail;
3198 }
3199 if (dpl < cpl || dpl < rpl) {
3200 fail:
3201 CC_SRC = eflags & ~CC_Z;
3202 return;
3203 }
3204 }
3205 limit = get_seg_limit(e1, e2);
3206 T1 = limit;
3207 CC_SRC = eflags | CC_Z;
3208}
3209
3210void helper_lar(void)
3211{
3212 unsigned int selector;
3213 uint32_t e1, e2, eflags;
3214 int rpl, dpl, cpl, type;
3215
3216 eflags = cc_table[CC_OP].compute_all();
3217 selector = T0 & 0xffff;
3218 if ((selector & 0xfffc) == 0)
3219 goto fail;
3220 if (load_segment(&e1, &e2, selector) != 0)
3221 goto fail;
3222 rpl = selector & 3;
3223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3224 cpl = env->hflags & HF_CPL_MASK;
3225 if (e2 & DESC_S_MASK) {
3226 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3227 /* conforming */
3228 } else {
3229 if (dpl < cpl || dpl < rpl)
3230 goto fail;
3231 }
3232 } else {
3233 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3234 switch(type) {
3235 case 1:
3236 case 2:
3237 case 3:
3238 case 4:
3239 case 5:
3240 case 9:
3241 case 11:
3242 case 12:
3243 break;
3244 default:
3245 goto fail;
3246 }
3247 if (dpl < cpl || dpl < rpl) {
3248 fail:
3249 CC_SRC = eflags & ~CC_Z;
3250 return;
3251 }
3252 }
3253 T1 = e2 & 0x00f0ff00;
3254 CC_SRC = eflags | CC_Z;
3255}
3256
3257void helper_verr(void)
3258{
3259 unsigned int selector;
3260 uint32_t e1, e2, eflags;
3261 int rpl, dpl, cpl;
3262
3263 eflags = cc_table[CC_OP].compute_all();
3264 selector = T0 & 0xffff;
3265 if ((selector & 0xfffc) == 0)
3266 goto fail;
3267 if (load_segment(&e1, &e2, selector) != 0)
3268 goto fail;
3269 if (!(e2 & DESC_S_MASK))
3270 goto fail;
3271 rpl = selector & 3;
3272 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3273 cpl = env->hflags & HF_CPL_MASK;
3274 if (e2 & DESC_CS_MASK) {
3275 if (!(e2 & DESC_R_MASK))
3276 goto fail;
3277 if (!(e2 & DESC_C_MASK)) {
3278 if (dpl < cpl || dpl < rpl)
3279 goto fail;
3280 }
3281 } else {
3282 if (dpl < cpl || dpl < rpl) {
3283 fail:
3284 CC_SRC = eflags & ~CC_Z;
3285 return;
3286 }
3287 }
3288 CC_SRC = eflags | CC_Z;
3289}
3290
3291void helper_verw(void)
3292{
3293 unsigned int selector;
3294 uint32_t e1, e2, eflags;
3295 int rpl, dpl, cpl;
3296
3297 eflags = cc_table[CC_OP].compute_all();
3298 selector = T0 & 0xffff;
3299 if ((selector & 0xfffc) == 0)
3300 goto fail;
3301 if (load_segment(&e1, &e2, selector) != 0)
3302 goto fail;
3303 if (!(e2 & DESC_S_MASK))
3304 goto fail;
3305 rpl = selector & 3;
3306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3307 cpl = env->hflags & HF_CPL_MASK;
3308 if (e2 & DESC_CS_MASK) {
3309 goto fail;
3310 } else {
3311 if (dpl < cpl || dpl < rpl)
3312 goto fail;
3313 if (!(e2 & DESC_W_MASK)) {
3314 fail:
3315 CC_SRC = eflags & ~CC_Z;
3316 return;
3317 }
3318 }
3319 CC_SRC = eflags | CC_Z;
3320}
3321
3322/* FPU helpers */
3323
3324void helper_fldt_ST0_A0(void)
3325{
3326 int new_fpstt;
3327 new_fpstt = (env->fpstt - 1) & 7;
3328 env->fpregs[new_fpstt].d = helper_fldt(A0);
3329 env->fpstt = new_fpstt;
3330 env->fptags[new_fpstt] = 0; /* validate stack entry */
3331}
3332
3333void helper_fstt_ST0_A0(void)
3334{
3335 helper_fstt(ST0, A0);
3336}
3337
3338void fpu_set_exception(int mask)
3339{
3340 env->fpus |= mask;
3341 if (env->fpus & (~env->fpuc & FPUC_EM))
3342 env->fpus |= FPUS_SE | FPUS_B;
3343}
3344
3345CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3346{
3347 if (b == 0.0)
3348 fpu_set_exception(FPUS_ZE);
3349 return a / b;
3350}
3351
3352void fpu_raise_exception(void)
3353{
3354 if (env->cr[0] & CR0_NE_MASK) {
3355 raise_exception(EXCP10_COPR);
3356 }
3357#if !defined(CONFIG_USER_ONLY)
3358 else {
3359 cpu_set_ferr(env);
3360 }
3361#endif
3362}
3363
3364/* BCD ops */
3365
3366void helper_fbld_ST0_A0(void)
3367{
3368 CPU86_LDouble tmp;
3369 uint64_t val;
3370 unsigned int v;
3371 int i;
3372
3373 val = 0;
3374 for(i = 8; i >= 0; i--) {
3375 v = ldub(A0 + i);
3376 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3377 }
3378 tmp = val;
3379 if (ldub(A0 + 9) & 0x80)
3380 tmp = -tmp;
3381 fpush();
3382 ST0 = tmp;
3383}
3384
3385void helper_fbst_ST0_A0(void)
3386{
3387 int v;
3388 target_ulong mem_ref, mem_end;
3389 int64_t val;
3390
3391 val = floatx_to_int64(ST0, &env->fp_status);
3392 mem_ref = A0;
3393 mem_end = mem_ref + 9;
3394 if (val < 0) {
3395 stb(mem_end, 0x80);
3396 val = -val;
3397 } else {
3398 stb(mem_end, 0x00);
3399 }
3400 while (mem_ref < mem_end) {
3401 if (val == 0)
3402 break;
3403 v = val % 100;
3404 val = val / 100;
3405 v = ((v / 10) << 4) | (v % 10);
3406 stb(mem_ref++, v);
3407 }
3408 while (mem_ref < mem_end) {
3409 stb(mem_ref++, 0);
3410 }
3411}
3412
3413void helper_f2xm1(void)
3414{
3415 ST0 = pow(2.0,ST0) - 1.0;
3416}
3417
3418void helper_fyl2x(void)
3419{
3420 CPU86_LDouble fptemp;
3421
3422 fptemp = ST0;
3423 if (fptemp>0.0){
3424 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3425 ST1 *= fptemp;
3426 fpop();
3427 } else {
3428 env->fpus &= (~0x4700);
3429 env->fpus |= 0x400;
3430 }
3431}
3432
3433void helper_fptan(void)
3434{
3435 CPU86_LDouble fptemp;
3436
3437 fptemp = ST0;
3438 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3439 env->fpus |= 0x400;
3440 } else {
3441 ST0 = tan(fptemp);
3442 fpush();
3443 ST0 = 1.0;
3444 env->fpus &= (~0x400); /* C2 <-- 0 */
3445 /* the above code is for |arg| < 2**52 only */
3446 }
3447}
3448
3449void helper_fpatan(void)
3450{
3451 CPU86_LDouble fptemp, fpsrcop;
3452
3453 fpsrcop = ST1;
3454 fptemp = ST0;
3455 ST1 = atan2(fpsrcop,fptemp);
3456 fpop();
3457}
3458
3459void helper_fxtract(void)
3460{
3461 CPU86_LDoubleU temp;
3462 unsigned int expdif;
3463
3464 temp.d = ST0;
3465 expdif = EXPD(temp) - EXPBIAS;
3466 /*DP exponent bias*/
3467 ST0 = expdif;
3468 fpush();
3469 BIASEXPONENT(temp);
3470 ST0 = temp.d;
3471}
3472
3473void helper_fprem1(void)
3474{
3475 CPU86_LDouble dblq, fpsrcop, fptemp;
3476 CPU86_LDoubleU fpsrcop1, fptemp1;
3477 int expdif;
3478 int q;
3479
3480 fpsrcop = ST0;
3481 fptemp = ST1;
3482 fpsrcop1.d = fpsrcop;
3483 fptemp1.d = fptemp;
3484 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3485 if (expdif < 53) {
3486 dblq = fpsrcop / fptemp;
3487 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3488 ST0 = fpsrcop - fptemp*dblq;
3489 q = (int)dblq; /* cutting off top bits is assumed here */
3490 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3491 /* (C0,C1,C3) <-- (q2,q1,q0) */
3492 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3493 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3494 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3495 } else {
3496 env->fpus |= 0x400; /* C2 <-- 1 */
3497 fptemp = pow(2.0, expdif-50);
3498 fpsrcop = (ST0 / ST1) / fptemp;
3499 /* fpsrcop = integer obtained by rounding to the nearest */
3500 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3501 floor(fpsrcop): ceil(fpsrcop);
3502 ST0 -= (ST1 * fpsrcop * fptemp);
3503 }
3504}
3505
3506void helper_fprem(void)
3507{
3508 CPU86_LDouble dblq, fpsrcop, fptemp;
3509 CPU86_LDoubleU fpsrcop1, fptemp1;
3510 int expdif;
3511 int q;
3512
3513 fpsrcop = ST0;
3514 fptemp = ST1;
3515 fpsrcop1.d = fpsrcop;
3516 fptemp1.d = fptemp;
3517 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3518 if ( expdif < 53 ) {
3519 dblq = fpsrcop / fptemp;
3520 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3521 ST0 = fpsrcop - fptemp*dblq;
3522 q = (int)dblq; /* cutting off top bits is assumed here */
3523 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3524 /* (C0,C1,C3) <-- (q2,q1,q0) */
3525 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3526 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3527 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3528 } else {
3529 env->fpus |= 0x400; /* C2 <-- 1 */
3530 fptemp = pow(2.0, expdif-50);
3531 fpsrcop = (ST0 / ST1) / fptemp;
3532 /* fpsrcop = integer obtained by chopping */
3533 fpsrcop = (fpsrcop < 0.0)?
3534 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3535 ST0 -= (ST1 * fpsrcop * fptemp);
3536 }
3537}
3538
3539void helper_fyl2xp1(void)
3540{
3541 CPU86_LDouble fptemp;
3542
3543 fptemp = ST0;
3544 if ((fptemp+1.0)>0.0) {
3545 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3546 ST1 *= fptemp;
3547 fpop();
3548 } else {
3549 env->fpus &= (~0x4700);
3550 env->fpus |= 0x400;
3551 }
3552}
3553
3554void helper_fsqrt(void)
3555{
3556 CPU86_LDouble fptemp;
3557
3558 fptemp = ST0;
3559 if (fptemp<0.0) {
3560 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3561 env->fpus |= 0x400;
3562 }
3563 ST0 = sqrt(fptemp);
3564}
3565
3566void helper_fsincos(void)
3567{
3568 CPU86_LDouble fptemp;
3569
3570 fptemp = ST0;
3571 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3572 env->fpus |= 0x400;
3573 } else {
3574 ST0 = sin(fptemp);
3575 fpush();
3576 ST0 = cos(fptemp);
3577 env->fpus &= (~0x400); /* C2 <-- 0 */
3578 /* the above code is for |arg| < 2**63 only */
3579 }
3580}
3581
3582void helper_frndint(void)
3583{
3584 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3585}
3586
3587void helper_fscale(void)
3588{
3589 ST0 = ldexp (ST0, (int)(ST1));
3590}
3591
3592void helper_fsin(void)
3593{
3594 CPU86_LDouble fptemp;
3595
3596 fptemp = ST0;
3597 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3598 env->fpus |= 0x400;
3599 } else {
3600 ST0 = sin(fptemp);
3601 env->fpus &= (~0x400); /* C2 <-- 0 */
3602 /* the above code is for |arg| < 2**53 only */
3603 }
3604}
3605
3606void helper_fcos(void)
3607{
3608 CPU86_LDouble fptemp;
3609
3610 fptemp = ST0;
3611 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3612 env->fpus |= 0x400;
3613 } else {
3614 ST0 = cos(fptemp);
3615 env->fpus &= (~0x400); /* C2 <-- 0 */
3616 /* the above code is for |arg5 < 2**63 only */
3617 }
3618}
3619
3620void helper_fxam_ST0(void)
3621{
3622 CPU86_LDoubleU temp;
3623 int expdif;
3624
3625 temp.d = ST0;
3626
3627 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3628 if (SIGND(temp))
3629 env->fpus |= 0x200; /* C1 <-- 1 */
3630
3631 /* XXX: test fptags too */
3632 expdif = EXPD(temp);
3633 if (expdif == MAXEXPD) {
3634#ifdef USE_X86LDOUBLE
3635 if (MANTD(temp) == 0x8000000000000000ULL)
3636#else
3637 if (MANTD(temp) == 0)
3638#endif
3639 env->fpus |= 0x500 /*Infinity*/;
3640 else
3641 env->fpus |= 0x100 /*NaN*/;
3642 } else if (expdif == 0) {
3643 if (MANTD(temp) == 0)
3644 env->fpus |= 0x4000 /*Zero*/;
3645 else
3646 env->fpus |= 0x4400 /*Denormal*/;
3647 } else {
3648 env->fpus |= 0x400;
3649 }
3650}
3651
3652void helper_fstenv(target_ulong ptr, int data32)
3653{
3654 int fpus, fptag, exp, i;
3655 uint64_t mant;
3656 CPU86_LDoubleU tmp;
3657
3658 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3659 fptag = 0;
3660 for (i=7; i>=0; i--) {
3661 fptag <<= 2;
3662 if (env->fptags[i]) {
3663 fptag |= 3;
3664 } else {
3665 tmp.d = env->fpregs[i].d;
3666 exp = EXPD(tmp);
3667 mant = MANTD(tmp);
3668 if (exp == 0 && mant == 0) {
3669 /* zero */
3670 fptag |= 1;
3671 } else if (exp == 0 || exp == MAXEXPD
3672#ifdef USE_X86LDOUBLE
3673 || (mant & (1LL << 63)) == 0
3674#endif
3675 ) {
3676 /* NaNs, infinity, denormal */
3677 fptag |= 2;
3678 }
3679 }
3680 }
3681 if (data32) {
3682 /* 32 bit */
3683 stl(ptr, env->fpuc);
3684 stl(ptr + 4, fpus);
3685 stl(ptr + 8, fptag);
3686 stl(ptr + 12, 0); /* fpip */
3687 stl(ptr + 16, 0); /* fpcs */
3688 stl(ptr + 20, 0); /* fpoo */
3689 stl(ptr + 24, 0); /* fpos */
3690 } else {
3691 /* 16 bit */
3692 stw(ptr, env->fpuc);
3693 stw(ptr + 2, fpus);
3694 stw(ptr + 4, fptag);
3695 stw(ptr + 6, 0);
3696 stw(ptr + 8, 0);
3697 stw(ptr + 10, 0);
3698 stw(ptr + 12, 0);
3699 }
3700}
3701
3702void helper_fldenv(target_ulong ptr, int data32)
3703{
3704 int i, fpus, fptag;
3705
3706 if (data32) {
3707 env->fpuc = lduw(ptr);
3708 fpus = lduw(ptr + 4);
3709 fptag = lduw(ptr + 8);
3710 }
3711 else {
3712 env->fpuc = lduw(ptr);
3713 fpus = lduw(ptr + 2);
3714 fptag = lduw(ptr + 4);
3715 }
3716 env->fpstt = (fpus >> 11) & 7;
3717 env->fpus = fpus & ~0x3800;
3718 for(i = 0;i < 8; i++) {
3719 env->fptags[i] = ((fptag & 3) == 3);
3720 fptag >>= 2;
3721 }
3722}
3723
3724void helper_fsave(target_ulong ptr, int data32)
3725{
3726 CPU86_LDouble tmp;
3727 int i;
3728
3729 helper_fstenv(ptr, data32);
3730
3731 ptr += (14 << data32);
3732 for(i = 0;i < 8; i++) {
3733 tmp = ST(i);
3734 helper_fstt(tmp, ptr);
3735 ptr += 10;
3736 }
3737
3738 /* fninit */
3739 env->fpus = 0;
3740 env->fpstt = 0;
3741 env->fpuc = 0x37f;
3742 env->fptags[0] = 1;
3743 env->fptags[1] = 1;
3744 env->fptags[2] = 1;
3745 env->fptags[3] = 1;
3746 env->fptags[4] = 1;
3747 env->fptags[5] = 1;
3748 env->fptags[6] = 1;
3749 env->fptags[7] = 1;
3750}
3751
3752void helper_frstor(target_ulong ptr, int data32)
3753{
3754 CPU86_LDouble tmp;
3755 int i;
3756
3757 helper_fldenv(ptr, data32);
3758 ptr += (14 << data32);
3759
3760 for(i = 0;i < 8; i++) {
3761 tmp = helper_fldt(ptr);
3762 ST(i) = tmp;
3763 ptr += 10;
3764 }
3765}
3766
3767void helper_fxsave(target_ulong ptr, int data64)
3768{
3769 int fpus, fptag, i, nb_xmm_regs;
3770 CPU86_LDouble tmp;
3771 target_ulong addr;
3772
3773 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3774 fptag = 0;
3775 for(i = 0; i < 8; i++) {
3776 fptag |= (env->fptags[i] << i);
3777 }
3778 stw(ptr, env->fpuc);
3779 stw(ptr + 2, fpus);
3780 stw(ptr + 4, fptag ^ 0xff);
3781
3782 addr = ptr + 0x20;
3783 for(i = 0;i < 8; i++) {
3784 tmp = ST(i);
3785 helper_fstt(tmp, addr);
3786 addr += 16;
3787 }
3788
3789 if (env->cr[4] & CR4_OSFXSR_MASK) {
3790 /* XXX: finish it */
3791 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3792 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3793 nb_xmm_regs = 8 << data64;
3794 addr = ptr + 0xa0;
3795 for(i = 0; i < nb_xmm_regs; i++) {
3796 stq(addr, env->xmm_regs[i].XMM_Q(0));
3797 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3798 addr += 16;
3799 }
3800 }
3801}
3802
3803void helper_fxrstor(target_ulong ptr, int data64)
3804{
3805 int i, fpus, fptag, nb_xmm_regs;
3806 CPU86_LDouble tmp;
3807 target_ulong addr;
3808
3809 env->fpuc = lduw(ptr);
3810 fpus = lduw(ptr + 2);
3811 fptag = lduw(ptr + 4);
3812 env->fpstt = (fpus >> 11) & 7;
3813 env->fpus = fpus & ~0x3800;
3814 fptag ^= 0xff;
3815 for(i = 0;i < 8; i++) {
3816 env->fptags[i] = ((fptag >> i) & 1);
3817 }
3818
3819 addr = ptr + 0x20;
3820 for(i = 0;i < 8; i++) {
3821 tmp = helper_fldt(addr);
3822 ST(i) = tmp;
3823 addr += 16;
3824 }
3825
3826 if (env->cr[4] & CR4_OSFXSR_MASK) {
3827 /* XXX: finish it */
3828 env->mxcsr = ldl(ptr + 0x18);
3829 //ldl(ptr + 0x1c);
3830 nb_xmm_regs = 8 << data64;
3831 addr = ptr + 0xa0;
3832 for(i = 0; i < nb_xmm_regs; i++) {
3833#if !defined(VBOX) || __GNUC__ < 4
3834 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3835 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3836#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3837# if 1
3838 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3839 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3840 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3841 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3842# else
3843 /* this works fine on Mac OS X, gcc 4.0.1 */
3844 uint64_t u64 = ldq(addr);
3845 env->xmm_regs[i].XMM_Q(0);
3846 u64 = ldq(addr + 4);
3847 env->xmm_regs[i].XMM_Q(1) = u64;
3848# endif
3849#endif
3850 addr += 16;
3851 }
3852 }
3853}
3854
3855#ifndef USE_X86LDOUBLE
3856
3857void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3858{
3859 CPU86_LDoubleU temp;
3860 int e;
3861
3862 temp.d = f;
3863 /* mantissa */
3864 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3865 /* exponent + sign */
3866 e = EXPD(temp) - EXPBIAS + 16383;
3867 e |= SIGND(temp) >> 16;
3868 *pexp = e;
3869}
3870
3871CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3872{
3873 CPU86_LDoubleU temp;
3874 int e;
3875 uint64_t ll;
3876
3877 /* XXX: handle overflow ? */
3878 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3879 e |= (upper >> 4) & 0x800; /* sign */
3880 ll = (mant >> 11) & ((1LL << 52) - 1);
3881#ifdef __arm__
3882 temp.l.upper = (e << 20) | (ll >> 32);
3883 temp.l.lower = ll;
3884#else
3885 temp.ll = ll | ((uint64_t)e << 52);
3886#endif
3887 return temp.d;
3888}
3889
3890#else
3891
3892void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3893{
3894 CPU86_LDoubleU temp;
3895
3896 temp.d = f;
3897 *pmant = temp.l.lower;
3898 *pexp = temp.l.upper;
3899}
3900
3901CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3902{
3903 CPU86_LDoubleU temp;
3904
3905 temp.l.upper = upper;
3906 temp.l.lower = mant;
3907 return temp.d;
3908}
3909#endif
3910
3911#ifdef TARGET_X86_64
3912
3913//#define DEBUG_MULDIV
3914
3915static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3916{
3917 *plow += a;
3918 /* carry test */
3919 if (*plow < a)
3920 (*phigh)++;
3921 *phigh += b;
3922}
3923
3924static void neg128(uint64_t *plow, uint64_t *phigh)
3925{
3926 *plow = ~ *plow;
3927 *phigh = ~ *phigh;
3928 add128(plow, phigh, 1, 0);
3929}
3930
3931static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3932{
3933 uint32_t a0, a1, b0, b1;
3934 uint64_t v;
3935
3936 a0 = a;
3937 a1 = a >> 32;
3938
3939 b0 = b;
3940 b1 = b >> 32;
3941
3942 v = (uint64_t)a0 * (uint64_t)b0;
3943 *plow = v;
3944 *phigh = 0;
3945
3946 v = (uint64_t)a0 * (uint64_t)b1;
3947 add128(plow, phigh, v << 32, v >> 32);
3948
3949 v = (uint64_t)a1 * (uint64_t)b0;
3950 add128(plow, phigh, v << 32, v >> 32);
3951
3952 v = (uint64_t)a1 * (uint64_t)b1;
3953 *phigh += v;
3954#ifdef DEBUG_MULDIV
3955 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3956 a, b, *phigh, *plow);
3957#endif
3958}
3959
3960static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3961{
3962 int sa, sb;
3963 sa = (a < 0);
3964 if (sa)
3965 a = -a;
3966 sb = (b < 0);
3967 if (sb)
3968 b = -b;
3969 mul64(plow, phigh, a, b);
3970 if (sa ^ sb) {
3971 neg128(plow, phigh);
3972 }
3973}
3974
3975/* return TRUE if overflow */
3976static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3977{
3978 uint64_t q, r, a1, a0;
3979 int i, qb, ab;
3980
3981 a0 = *plow;
3982 a1 = *phigh;
3983 if (a1 == 0) {
3984 q = a0 / b;
3985 r = a0 % b;
3986 *plow = q;
3987 *phigh = r;
3988 } else {
3989 if (a1 >= b)
3990 return 1;
3991 /* XXX: use a better algorithm */
3992 for(i = 0; i < 64; i++) {
3993 ab = a1 >> 63;
3994 a1 = (a1 << 1) | (a0 >> 63);
3995 if (ab || a1 >= b) {
3996 a1 -= b;
3997 qb = 1;
3998 } else {
3999 qb = 0;
4000 }
4001 a0 = (a0 << 1) | qb;
4002 }
4003#if defined(DEBUG_MULDIV)
4004 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4005 *phigh, *plow, b, a0, a1);
4006#endif
4007 *plow = a0;
4008 *phigh = a1;
4009 }
4010 return 0;
4011}
4012
4013/* return TRUE if overflow */
4014static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4015{
4016 int sa, sb;
4017 sa = ((int64_t)*phigh < 0);
4018 if (sa)
4019 neg128(plow, phigh);
4020 sb = (b < 0);
4021 if (sb)
4022 b = -b;
4023 if (div64(plow, phigh, b) != 0)
4024 return 1;
4025 if (sa ^ sb) {
4026 if (*plow > (1ULL << 63))
4027 return 1;
4028 *plow = - *plow;
4029 } else {
4030 if (*plow >= (1ULL << 63))
4031 return 1;
4032 }
4033 if (sa)
4034 *phigh = - *phigh;
4035 return 0;
4036}
4037
4038void helper_mulq_EAX_T0(void)
4039{
4040 uint64_t r0, r1;
4041
4042 mul64(&r0, &r1, EAX, T0);
4043 EAX = r0;
4044 EDX = r1;
4045 CC_DST = r0;
4046 CC_SRC = r1;
4047}
4048
4049void helper_imulq_EAX_T0(void)
4050{
4051 uint64_t r0, r1;
4052
4053 imul64(&r0, &r1, EAX, T0);
4054 EAX = r0;
4055 EDX = r1;
4056 CC_DST = r0;
4057 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4058}
4059
4060void helper_imulq_T0_T1(void)
4061{
4062 uint64_t r0, r1;
4063
4064 imul64(&r0, &r1, T0, T1);
4065 T0 = r0;
4066 CC_DST = r0;
4067 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4068}
4069
4070void helper_divq_EAX_T0(void)
4071{
4072 uint64_t r0, r1;
4073 if (T0 == 0) {
4074 raise_exception(EXCP00_DIVZ);
4075 }
4076 r0 = EAX;
4077 r1 = EDX;
4078 if (div64(&r0, &r1, T0))
4079 raise_exception(EXCP00_DIVZ);
4080 EAX = r0;
4081 EDX = r1;
4082}
4083
4084void helper_idivq_EAX_T0(void)
4085{
4086 uint64_t r0, r1;
4087 if (T0 == 0) {
4088 raise_exception(EXCP00_DIVZ);
4089 }
4090 r0 = EAX;
4091 r1 = EDX;
4092 if (idiv64(&r0, &r1, T0))
4093 raise_exception(EXCP00_DIVZ);
4094 EAX = r0;
4095 EDX = r1;
4096}
4097
4098void helper_bswapq_T0(void)
4099{
4100 T0 = bswap64(T0);
4101}
4102#endif
4103
4104void helper_hlt(void)
4105{
4106 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4107 env->hflags |= HF_HALTED_MASK;
4108 env->exception_index = EXCP_HLT;
4109 cpu_loop_exit();
4110}
4111
4112void helper_monitor(void)
4113{
4114 if ((uint32_t)ECX != 0)
4115 raise_exception(EXCP0D_GPF);
4116 /* XXX: store address ? */
4117}
4118
4119void helper_mwait(void)
4120{
4121 if ((uint32_t)ECX != 0)
4122 raise_exception(EXCP0D_GPF);
4123#ifdef VBOX
4124 helper_hlt();
4125#else
4126 /* XXX: not complete but not completely erroneous */
4127 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4128 /* more than one CPU: do not sleep because another CPU may
4129 wake this one */
4130 } else {
4131 helper_hlt();
4132 }
4133#endif
4134}
4135
4136float approx_rsqrt(float a)
4137{
4138 return 1.0 / sqrt(a);
4139}
4140
4141float approx_rcp(float a)
4142{
4143 return 1.0 / a;
4144}
4145
4146void update_fp_status(void)
4147{
4148 int rnd_type;
4149
4150 /* set rounding mode */
4151 switch(env->fpuc & RC_MASK) {
4152 default:
4153 case RC_NEAR:
4154 rnd_type = float_round_nearest_even;
4155 break;
4156 case RC_DOWN:
4157 rnd_type = float_round_down;
4158 break;
4159 case RC_UP:
4160 rnd_type = float_round_up;
4161 break;
4162 case RC_CHOP:
4163 rnd_type = float_round_to_zero;
4164 break;
4165 }
4166 set_float_rounding_mode(rnd_type, &env->fp_status);
4167#ifdef FLOATX80
4168 switch((env->fpuc >> 8) & 3) {
4169 case 0:
4170 rnd_type = 32;
4171 break;
4172 case 2:
4173 rnd_type = 64;
4174 break;
4175 case 3:
4176 default:
4177 rnd_type = 80;
4178 break;
4179 }
4180 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4181#endif
4182}
4183
4184#if !defined(CONFIG_USER_ONLY)
4185
4186#define MMUSUFFIX _mmu
4187#define GETPC() (__builtin_return_address(0))
4188
4189#define SHIFT 0
4190#include "softmmu_template.h"
4191
4192#define SHIFT 1
4193#include "softmmu_template.h"
4194
4195#define SHIFT 2
4196#include "softmmu_template.h"
4197
4198#define SHIFT 3
4199#include "softmmu_template.h"
4200
4201#endif
4202
4203/* try to fill the TLB and return an exception if error. If retaddr is
4204 NULL, it means that the function was called in C code (i.e. not
4205 from generated code or from helper.c) */
4206/* XXX: fix it to restore all registers */
4207void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4208{
4209 TranslationBlock *tb;
4210 int ret;
4211 unsigned long pc;
4212 CPUX86State *saved_env;
4213
4214 /* XXX: hack to restore env in all cases, even if not called from
4215 generated code */
4216 saved_env = env;
4217 env = cpu_single_env;
4218
4219 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4220 if (ret) {
4221 if (retaddr) {
4222 /* now we have a real cpu fault */
4223 pc = (unsigned long)retaddr;
4224 tb = tb_find_pc(pc);
4225 if (tb) {
4226 /* the PC is inside the translated code. It means that we have
4227 a virtual CPU fault */
4228 cpu_restore_state(tb, env, pc, NULL);
4229 }
4230 }
4231 if (retaddr)
4232 raise_exception_err(env->exception_index, env->error_code);
4233 else
4234 raise_exception_err_norestore(env->exception_index, env->error_code);
4235 }
4236 env = saved_env;
4237}
4238
4239#ifdef VBOX
4240
4241/**
4242 * Correctly computes the eflags.
4243 * @returns eflags.
4244 * @param env1 CPU environment.
4245 */
4246uint32_t raw_compute_eflags(CPUX86State *env1)
4247{
4248 CPUX86State *savedenv = env;
4249 env = env1;
4250 uint32_t efl = compute_eflags();
4251 env = savedenv;
4252 return efl;
4253}
4254
4255/**
4256 * Reads byte from virtual address in guest memory area.
4257 * XXX: is it working for any addresses? swapped out pages?
4258 * @returns readed data byte.
4259 * @param env1 CPU environment.
4260 * @param pvAddr GC Virtual address.
4261 */
4262uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4263{
4264 CPUX86State *savedenv = env;
4265 env = env1;
4266 uint8_t u8 = ldub_kernel(addr);
4267 env = savedenv;
4268 return u8;
4269}
4270
4271/**
4272 * Reads byte from virtual address in guest memory area.
4273 * XXX: is it working for any addresses? swapped out pages?
4274 * @returns readed data byte.
4275 * @param env1 CPU environment.
4276 * @param pvAddr GC Virtual address.
4277 */
4278uint16_t read_word(CPUX86State *env1, target_ulong addr)
4279{
4280 CPUX86State *savedenv = env;
4281 env = env1;
4282 uint16_t u16 = lduw_kernel(addr);
4283 env = savedenv;
4284 return u16;
4285}
4286
4287/**
4288 * Reads byte from virtual address in guest memory area.
4289 * XXX: is it working for any addresses? swapped out pages?
4290 * @returns readed data byte.
4291 * @param env1 CPU environment.
4292 * @param pvAddr GC Virtual address.
4293 */
4294uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4295{
4296 CPUX86State *savedenv = env;
4297 env = env1;
4298 uint32_t u32 = ldl_kernel(addr);
4299 env = savedenv;
4300 return u32;
4301}
4302
4303/**
4304 * Writes byte to virtual address in guest memory area.
4305 * XXX: is it working for any addresses? swapped out pages?
4306 * @returns readed data byte.
4307 * @param env1 CPU environment.
4308 * @param pvAddr GC Virtual address.
4309 * @param val byte value
4310 */
4311void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4312{
4313 CPUX86State *savedenv = env;
4314 env = env1;
4315 stb(addr, val);
4316 env = savedenv;
4317}
4318
4319void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4320{
4321 CPUX86State *savedenv = env;
4322 env = env1;
4323 stw(addr, val);
4324 env = savedenv;
4325}
4326
4327void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4328{
4329 CPUX86State *savedenv = env;
4330 env = env1;
4331 stl(addr, val);
4332 env = savedenv;
4333}
4334
4335/**
4336 * Correctly loads selector into segment register with updating internal
4337 * qemu data/caches.
4338 * @param env1 CPU environment.
4339 * @param seg_reg Segment register.
4340 * @param selector Selector to load.
4341 */
4342void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4343{
4344 CPUX86State *savedenv = env;
4345 env = env1;
4346
4347 if ( env->eflags & X86_EFL_VM
4348 || !(env->cr[0] & X86_CR0_PE))
4349 {
4350 load_seg_vm(seg_reg, selector);
4351
4352 env = savedenv;
4353
4354 /* Successful sync. */
4355 env1->segs[seg_reg].newselector = 0;
4356 }
4357 else
4358 {
4359 if (setjmp(env1->jmp_env) == 0)
4360 {
4361 if (seg_reg == R_CS)
4362 {
4363 uint32_t e1, e2;
4364 load_segment(&e1, &e2, selector);
4365 cpu_x86_load_seg_cache(env, R_CS, selector,
4366 get_seg_base(e1, e2),
4367 get_seg_limit(e1, e2),
4368 e2);
4369 }
4370 else
4371 load_seg(seg_reg, selector);
4372 env = savedenv;
4373
4374 /* Successful sync. */
4375 env1->segs[seg_reg].newselector = 0;
4376 }
4377 else
4378 {
4379 env = savedenv;
4380
4381 /* Postpone sync until the guest uses the selector. */
4382 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4383 env1->segs[seg_reg].newselector = selector;
4384 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4385 }
4386 }
4387
4388}
4389
4390
4391/**
4392 * Correctly loads a new ldtr selector.
4393 *
4394 * @param env1 CPU environment.
4395 * @param selector Selector to load.
4396 */
4397void sync_ldtr(CPUX86State *env1, int selector)
4398{
4399 CPUX86State *saved_env = env;
4400 target_ulong saved_T0 = T0;
4401 if (setjmp(env1->jmp_env) == 0)
4402 {
4403 env = env1;
4404 T0 = selector;
4405 helper_lldt_T0();
4406 T0 = saved_T0;
4407 env = saved_env;
4408 }
4409 else
4410 {
4411 T0 = saved_T0;
4412 env = saved_env;
4413#ifdef VBOX_STRICT
4414 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4415#endif
4416 }
4417}
4418
4419/**
4420 * Correctly loads a new tr selector.
4421 *
4422 * @param env1 CPU environment.
4423 * @param selector Selector to load.
4424 */
4425int sync_tr(CPUX86State *env1, int selector)
4426{
4427 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4428 SegmentCache *dt;
4429 uint32_t e1, e2;
4430 int index, type, entry_limit;
4431 target_ulong ptr;
4432 CPUX86State *saved_env = env;
4433 env = env1;
4434
4435 selector &= 0xffff;
4436 if ((selector & 0xfffc) == 0) {
4437 /* NULL selector case: invalid TR */
4438 env->tr.base = 0;
4439 env->tr.limit = 0;
4440 env->tr.flags = 0;
4441 } else {
4442 if (selector & 0x4)
4443 goto l_failure;
4444 dt = &env->gdt;
4445 index = selector & ~7;
4446#ifdef TARGET_X86_64
4447 if (env->hflags & HF_LMA_MASK)
4448 entry_limit = 15;
4449 else
4450#endif
4451 entry_limit = 7;
4452 if ((index + entry_limit) > dt->limit)
4453 goto l_failure;
4454 ptr = dt->base + index;
4455 e1 = ldl_kernel(ptr);
4456 e2 = ldl_kernel(ptr + 4);
4457 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4458 if ((e2 & DESC_S_MASK) /*||
4459 (type != 1 && type != 9)*/)
4460 goto l_failure;
4461 if (!(e2 & DESC_P_MASK))
4462 goto l_failure;
4463#ifdef TARGET_X86_64
4464 if (env->hflags & HF_LMA_MASK) {
4465 uint32_t e3;
4466 e3 = ldl_kernel(ptr + 8);
4467 load_seg_cache_raw_dt(&env->tr, e1, e2);
4468 env->tr.base |= (target_ulong)e3 << 32;
4469 } else
4470#endif
4471 {
4472 load_seg_cache_raw_dt(&env->tr, e1, e2);
4473 }
4474 e2 |= DESC_TSS_BUSY_MASK;
4475 stl_kernel(ptr + 4, e2);
4476 }
4477 env->tr.selector = selector;
4478
4479 env = saved_env;
4480 return 0;
4481l_failure:
4482 AssertMsgFailed(("selector=%d\n", selector));
4483 return -1;
4484}
4485
4486int emulate_single_instr(CPUX86State *env1)
4487{
4488#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4489 /* This has to be static because it needs to be addressible
4490 using 32-bit immediate addresses on 64-bit machines. This
4491 is dictated by the gcc code model used when building this
4492 module / op.o. Using a static here pushes the problem
4493 onto the module loader. */
4494 static TranslationBlock tb_temp;
4495#endif
4496 TranslationBlock *tb;
4497 TranslationBlock *current;
4498 int csize;
4499 void (*gen_func)(void);
4500 uint8_t *tc_ptr;
4501 target_ulong old_eip;
4502
4503 /* ensures env is loaded in ebp! */
4504 CPUX86State *savedenv = env;
4505 env = env1;
4506
4507 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4508
4509#if 1 /* see above */
4510 tc_ptr = env->pvCodeBuffer;
4511#else
4512 tc_ptr = code_gen_ptr;
4513#endif
4514
4515 /*
4516 * Setup temporary translation block.
4517 */
4518 /* tb_alloc: */
4519#if 1 /* see above */
4520 tb = &tb_temp;
4521 tb->pc = env->segs[R_CS].base + env->eip;
4522 tb->cflags = 0;
4523#else
4524 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4525 if (!tb)
4526 {
4527 tb_flush(env);
4528 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4529 }
4530#endif
4531
4532 /* tb_find_slow: */
4533 tb->tc_ptr = tc_ptr;
4534 tb->cs_base = env->segs[R_CS].base;
4535 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4536
4537 /* Initialize the rest with sensible values. */
4538 tb->size = 0;
4539 tb->phys_hash_next = NULL;
4540 tb->page_next[0] = NULL;
4541 tb->page_next[1] = NULL;
4542 tb->page_addr[0] = 0;
4543 tb->page_addr[1] = 0;
4544 tb->tb_next_offset[0] = 0xffff;
4545 tb->tb_next_offset[1] = 0xffff;
4546 tb->tb_next[0] = 0xffff;
4547 tb->tb_next[1] = 0xffff;
4548 tb->jmp_next[0] = NULL;
4549 tb->jmp_next[1] = NULL;
4550 tb->jmp_first = NULL;
4551
4552 current = env->current_tb;
4553 env->current_tb = NULL;
4554
4555 /*
4556 * Translate only one instruction.
4557 */
4558 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4559 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4560 {
4561 AssertFailed();
4562 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4563 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4564 env = savedenv;
4565 return -1;
4566 }
4567#ifdef DEBUG
4568 if(csize > env->cbCodeBuffer)
4569 {
4570 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4571 AssertFailed();
4572 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4573 env = savedenv;
4574 return -1;
4575 }
4576 if (tb->tc_ptr != tc_ptr)
4577 {
4578 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4579 AssertFailed();
4580 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4581 env = savedenv;
4582 return -1;
4583 }
4584#endif
4585 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4586
4587 /* tb_link_phys: */
4588 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4589 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4590 if (tb->tb_next_offset[0] != 0xffff)
4591 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4592 if (tb->tb_next_offset[1] != 0xffff)
4593 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4594
4595 /*
4596 * Execute it using emulation
4597 */
4598 old_eip = env->eip;
4599 gen_func = (void *)tb->tc_ptr;
4600 env->current_tb = tb;
4601
4602 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4603 // perhaps not a very safe hack
4604 while(old_eip == env->eip)
4605 {
4606 gen_func();
4607 /*
4608 * Exit once we detect an external interrupt and interrupts are enabled
4609 */
4610 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4611 ( (env->eflags & IF_MASK) &&
4612 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4613 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4614 {
4615 break;
4616 }
4617 }
4618 env->current_tb = current;
4619
4620 Assert(tb->phys_hash_next == NULL);
4621 Assert(tb->page_next[0] == NULL);
4622 Assert(tb->page_next[1] == NULL);
4623 Assert(tb->page_addr[0] == 0);
4624 Assert(tb->page_addr[1] == 0);
4625/*
4626 Assert(tb->tb_next_offset[0] == 0xffff);
4627 Assert(tb->tb_next_offset[1] == 0xffff);
4628 Assert(tb->tb_next[0] == 0xffff);
4629 Assert(tb->tb_next[1] == 0xffff);
4630 Assert(tb->jmp_next[0] == NULL);
4631 Assert(tb->jmp_next[1] == NULL);
4632 Assert(tb->jmp_first == NULL); */
4633
4634 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4635
4636 /*
4637 * Execute the next instruction when we encounter instruction fusing.
4638 */
4639 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4640 {
4641 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %VGv\n", env->eip));
4642 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4643 emulate_single_instr(env);
4644 }
4645
4646 env = savedenv;
4647 return 0;
4648}
4649
4650int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4651 uint32_t *esp_ptr, int dpl)
4652{
4653 int type, index, shift;
4654
4655 CPUX86State *savedenv = env;
4656 env = env1;
4657
4658 if (!(env->tr.flags & DESC_P_MASK))
4659 cpu_abort(env, "invalid tss");
4660 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4661 if ((type & 7) != 1)
4662 cpu_abort(env, "invalid tss type %d", type);
4663 shift = type >> 3;
4664 index = (dpl * 4 + 2) << shift;
4665 if (index + (4 << shift) - 1 > env->tr.limit)
4666 {
4667 env = savedenv;
4668 return 0;
4669 }
4670 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4671
4672 if (shift == 0) {
4673 *esp_ptr = lduw_kernel(env->tr.base + index);
4674 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4675 } else {
4676 *esp_ptr = ldl_kernel(env->tr.base + index);
4677 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4678 }
4679
4680 env = savedenv;
4681 return 1;
4682}
4683
4684//*****************************************************************************
4685// Needs to be at the bottom of the file (overriding macros)
4686
4687static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4688{
4689 return *(CPU86_LDouble *)ptr;
4690}
4691
4692static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4693{
4694 *(CPU86_LDouble *)ptr = f;
4695}
4696
4697#undef stw
4698#undef stl
4699#undef stq
4700#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4701#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4702#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4703#define data64 0
4704
4705//*****************************************************************************
4706void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4707{
4708 int fpus, fptag, i, nb_xmm_regs;
4709 CPU86_LDouble tmp;
4710 uint8_t *addr;
4711
4712 if (env->cpuid_features & CPUID_FXSR)
4713 {
4714 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4715 fptag = 0;
4716 for(i = 0; i < 8; i++) {
4717 fptag |= (env->fptags[i] << i);
4718 }
4719 stw(ptr, env->fpuc);
4720 stw(ptr + 2, fpus);
4721 stw(ptr + 4, fptag ^ 0xff);
4722
4723 addr = ptr + 0x20;
4724 for(i = 0;i < 8; i++) {
4725 tmp = ST(i);
4726 helper_fstt_raw(tmp, addr);
4727 addr += 16;
4728 }
4729
4730 if (env->cr[4] & CR4_OSFXSR_MASK) {
4731 /* XXX: finish it */
4732 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4733 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4734 nb_xmm_regs = 8 << data64;
4735 addr = ptr + 0xa0;
4736 for(i = 0; i < nb_xmm_regs; i++) {
4737#if __GNUC__ < 4
4738 stq(addr, env->xmm_regs[i].XMM_Q(0));
4739 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4740#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4741 stl(addr, env->xmm_regs[i].XMM_L(0));
4742 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4743 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4744 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4745#endif
4746 addr += 16;
4747 }
4748 }
4749 }
4750 else
4751 {
4752 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4753 int fptag;
4754
4755 fp->FCW = env->fpuc;
4756 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4757 fptag = 0;
4758 for (i=7; i>=0; i--) {
4759 fptag <<= 2;
4760 if (env->fptags[i]) {
4761 fptag |= 3;
4762 } else {
4763 /* the FPU automatically computes it */
4764 }
4765 }
4766 fp->FTW = fptag;
4767
4768 for(i = 0;i < 8; i++) {
4769 tmp = ST(i);
4770 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4771 }
4772 }
4773}
4774
4775//*****************************************************************************
4776#undef lduw
4777#undef ldl
4778#undef ldq
4779#define lduw(a) *(uint16_t *)(a)
4780#define ldl(a) *(uint32_t *)(a)
4781#define ldq(a) *(uint64_t *)(a)
4782//*****************************************************************************
4783void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4784{
4785 int i, fpus, fptag, nb_xmm_regs;
4786 CPU86_LDouble tmp;
4787 uint8_t *addr;
4788
4789 if (env->cpuid_features & CPUID_FXSR)
4790 {
4791 env->fpuc = lduw(ptr);
4792 fpus = lduw(ptr + 2);
4793 fptag = lduw(ptr + 4);
4794 env->fpstt = (fpus >> 11) & 7;
4795 env->fpus = fpus & ~0x3800;
4796 fptag ^= 0xff;
4797 for(i = 0;i < 8; i++) {
4798 env->fptags[i] = ((fptag >> i) & 1);
4799 }
4800
4801 addr = ptr + 0x20;
4802 for(i = 0;i < 8; i++) {
4803 tmp = helper_fldt_raw(addr);
4804 ST(i) = tmp;
4805 addr += 16;
4806 }
4807
4808 if (env->cr[4] & CR4_OSFXSR_MASK) {
4809 /* XXX: finish it, endianness */
4810 env->mxcsr = ldl(ptr + 0x18);
4811 //ldl(ptr + 0x1c);
4812 nb_xmm_regs = 8 << data64;
4813 addr = ptr + 0xa0;
4814 for(i = 0; i < nb_xmm_regs; i++) {
4815#if HC_ARCH_BITS == 32
4816 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4817 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4818 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4819 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4820 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4821#else
4822 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4823 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4824#endif
4825 addr += 16;
4826 }
4827 }
4828 }
4829 else
4830 {
4831 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4832 int fptag, j;
4833
4834 env->fpuc = fp->FCW;
4835 env->fpstt = (fp->FSW >> 11) & 7;
4836 env->fpus = fp->FSW & ~0x3800;
4837 fptag = fp->FTW;
4838 for(i = 0;i < 8; i++) {
4839 env->fptags[i] = ((fptag & 3) == 3);
4840 fptag >>= 2;
4841 }
4842 j = env->fpstt;
4843 for(i = 0;i < 8; i++) {
4844 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4845 ST(i) = tmp;
4846 }
4847 }
4848}
4849//*****************************************************************************
4850//*****************************************************************************
4851
4852#endif /* VBOX */
4853
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette