VirtualBox

source: vbox/trunk/src/recompiler_new/target-i386/helper.c@ 13337

最後變更 在這個檔案從13337是 13337,由 vboxsync 提交於 16 年 前

more recompiler work

  • 屬性 svn:eol-style 設為 native
檔案大小: 136.0 KB
 
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#ifdef VBOX
30# include <VBox/err.h>
31#endif
32
33#ifndef VBOX
34#include <stdarg.h>
35#include <stdlib.h>
36#include <stdio.h>
37#include <string.h>
38#include <inttypes.h>
39#include <signal.h>
40#include <assert.h>
41#endif
42
43#include "cpu.h"
44#include "exec-all.h"
45#include "svm.h"
46#include "qemu-common.h"
47
48//#define DEBUG_PCALL
49
50#if 0
51#define raise_exception_err(a, b)\
52do {\
53 if (logfile)\
54 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
55 (raise_exception_err)(a, b);\
56} while (0)
57#endif
58
59const uint8_t parity_table[256] = {
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
83 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
85 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
86 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
87 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
88 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
91 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
92};
93
94/* modulo 17 table */
95const uint8_t rclw_table[32] = {
96 0, 1, 2, 3, 4, 5, 6, 7,
97 8, 9,10,11,12,13,14,15,
98 16, 0, 1, 2, 3, 4, 5, 6,
99 7, 8, 9,10,11,12,13,14,
100};
101
102/* modulo 9 table */
103const uint8_t rclb_table[32] = {
104 0, 1, 2, 3, 4, 5, 6, 7,
105 8, 0, 1, 2, 3, 4, 5, 6,
106 7, 8, 0, 1, 2, 3, 4, 5,
107 6, 7, 8, 0, 1, 2, 3, 4,
108};
109
110const CPU86_LDouble f15rk[7] =
111{
112 0.00000000000000000000L,
113 1.00000000000000000000L,
114 3.14159265358979323851L, /*pi*/
115 0.30102999566398119523L, /*lg2*/
116 0.69314718055994530943L, /*ln2*/
117 1.44269504088896340739L, /*l2e*/
118 3.32192809488736234781L, /*l2t*/
119};
120
121/* thread support */
122
123spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
124
125void cpu_lock(void)
126{
127 spin_lock(&global_cpu_lock);
128}
129
130void cpu_unlock(void)
131{
132 spin_unlock(&global_cpu_lock);
133}
134
135void cpu_loop_exit(void)
136{
137 /* NOTE: the register at this point must be saved by hand because
138 longjmp restore them */
139 regs_to_env();
140 longjmp(env->jmp_env, 1);
141}
142
143/* return non zero if error */
144static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
145 int selector)
146{
147 SegmentCache *dt;
148 int index;
149 target_ulong ptr;
150
151 if (selector & 0x4)
152 dt = &env->ldt;
153 else
154 dt = &env->gdt;
155 index = selector & ~7;
156 if ((index + 7) > dt->limit)
157 return -1;
158 ptr = dt->base + index;
159 *e1_ptr = ldl_kernel(ptr);
160 *e2_ptr = ldl_kernel(ptr + 4);
161 return 0;
162}
163
164static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
165{
166 unsigned int limit;
167 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
168 if (e2 & DESC_G_MASK)
169 limit = (limit << 12) | 0xfff;
170 return limit;
171}
172
173static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
174{
175 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
176}
177
178static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
179{
180 sc->base = get_seg_base(e1, e2);
181 sc->limit = get_seg_limit(e1, e2);
182 sc->flags = e2;
183}
184
185/* init the segment cache in vm86 mode. */
186static inline void load_seg_vm(int seg, int selector)
187{
188 selector &= 0xffff;
189 cpu_x86_load_seg_cache(env, seg, selector,
190 (selector << 4), 0xffff, 0);
191}
192
193static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
194 uint32_t *esp_ptr, int dpl)
195{
196 int type, index, shift;
197
198#if 0
199 {
200 int i;
201 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
202 for(i=0;i<env->tr.limit;i++) {
203 printf("%02x ", env->tr.base[i]);
204 if ((i & 7) == 7) printf("\n");
205 }
206 printf("\n");
207 }
208#endif
209
210 if (!(env->tr.flags & DESC_P_MASK))
211 cpu_abort(env, "invalid tss");
212 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
213 if ((type & 7) != 1)
214 cpu_abort(env, "invalid tss type %d", type);
215 shift = type >> 3;
216 index = (dpl * 4 + 2) << shift;
217 if (index + (4 << shift) - 1 > env->tr.limit)
218 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
219 if (shift == 0) {
220 *esp_ptr = lduw_kernel(env->tr.base + index);
221 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
222 } else {
223 *esp_ptr = ldl_kernel(env->tr.base + index);
224 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
225 }
226}
227
228/* XXX: merge with load_seg() */
229static void tss_load_seg(int seg_reg, int selector)
230{
231 uint32_t e1, e2;
232 int rpl, dpl, cpl;
233
234 if ((selector & 0xfffc) != 0) {
235 if (load_segment(&e1, &e2, selector) != 0)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 if (!(e2 & DESC_S_MASK))
238 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239 rpl = selector & 3;
240 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
241 cpl = env->hflags & HF_CPL_MASK;
242 if (seg_reg == R_CS) {
243 if (!(e2 & DESC_CS_MASK))
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 /* XXX: is it correct ? */
246 if (dpl != rpl)
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 if ((e2 & DESC_C_MASK) && dpl > rpl)
249 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250 } else if (seg_reg == R_SS) {
251 /* SS must be writable data */
252 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
253 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254 if (dpl != cpl || dpl != rpl)
255 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256 } else {
257 /* not readable code */
258 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
259 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 /* if data or non conforming code, checks the rights */
261 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
262 if (dpl < cpl || dpl < rpl)
263 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
264 }
265 }
266 if (!(e2 & DESC_P_MASK))
267 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
268 cpu_x86_load_seg_cache(env, seg_reg, selector,
269 get_seg_base(e1, e2),
270 get_seg_limit(e1, e2),
271 e2);
272 } else {
273 if (seg_reg == R_SS || seg_reg == R_CS)
274 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
275 }
276}
277
278#define SWITCH_TSS_JMP 0
279#define SWITCH_TSS_IRET 1
280#define SWITCH_TSS_CALL 2
281
282/* XXX: restore CPU state in registers (PowerPC case) */
283static void switch_tss(int tss_selector,
284 uint32_t e1, uint32_t e2, int source,
285 uint32_t next_eip)
286{
287 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
288 target_ulong tss_base;
289 uint32_t new_regs[8], new_segs[6];
290 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
291 uint32_t old_eflags, eflags_mask;
292 SegmentCache *dt;
293 int index;
294 target_ulong ptr;
295
296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297#ifdef DEBUG_PCALL
298 if (loglevel & CPU_LOG_PCALL)
299 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
300#endif
301
302#if defined(VBOX) && defined(DEBUG)
303 printf("switch_tss %x %x %x %d %08x\n", tss_selector, e1, e2, source, next_eip);
304#endif
305
306 /* if task gate, we read the TSS segment and we load it */
307 if (type == 5) {
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310 tss_selector = e1 >> 16;
311 if (tss_selector & 4)
312 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313 if (load_segment(&e1, &e2, tss_selector) != 0)
314 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
315 if (e2 & DESC_S_MASK)
316 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
317 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
318 if ((type & 7) != 1)
319 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
320 }
321
322 if (!(e2 & DESC_P_MASK))
323 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
324
325 if (type & 8)
326 tss_limit_max = 103;
327 else
328 tss_limit_max = 43;
329 tss_limit = get_seg_limit(e1, e2);
330 tss_base = get_seg_base(e1, e2);
331 if ((tss_selector & 4) != 0 ||
332 tss_limit < tss_limit_max)
333 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
334 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
335 if (old_type & 8)
336 old_tss_limit_max = 103;
337 else
338 old_tss_limit_max = 43;
339
340 /* read all the registers from the new TSS */
341 if (type & 8) {
342 /* 32 bit */
343 new_cr3 = ldl_kernel(tss_base + 0x1c);
344 new_eip = ldl_kernel(tss_base + 0x20);
345 new_eflags = ldl_kernel(tss_base + 0x24);
346 for(i = 0; i < 8; i++)
347 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
348 for(i = 0; i < 6; i++)
349 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
350 new_ldt = lduw_kernel(tss_base + 0x60);
351 new_trap = ldl_kernel(tss_base + 0x64);
352 } else {
353 /* 16 bit */
354 new_cr3 = 0;
355 new_eip = lduw_kernel(tss_base + 0x0e);
356 new_eflags = lduw_kernel(tss_base + 0x10);
357 for(i = 0; i < 8; i++)
358 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
359 for(i = 0; i < 4; i++)
360 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
361 new_ldt = lduw_kernel(tss_base + 0x2a);
362 new_segs[R_FS] = 0;
363 new_segs[R_GS] = 0;
364 new_trap = 0;
365 }
366
367 /* NOTE: we must avoid memory exceptions during the task switch,
368 so we make dummy accesses before */
369 /* XXX: it can still fail in some cases, so a bigger hack is
370 necessary to valid the TLB after having done the accesses */
371
372 v1 = ldub_kernel(env->tr.base);
373 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
374 stb_kernel(env->tr.base, v1);
375 stb_kernel(env->tr.base + old_tss_limit_max, v2);
376
377 /* clear busy bit (it is restartable) */
378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
379 target_ulong ptr;
380 uint32_t e2;
381 ptr = env->gdt.base + (env->tr.selector & ~7);
382 e2 = ldl_kernel(ptr + 4);
383 e2 &= ~DESC_TSS_BUSY_MASK;
384 stl_kernel(ptr + 4, e2);
385 }
386 old_eflags = compute_eflags();
387 if (source == SWITCH_TSS_IRET)
388 old_eflags &= ~NT_MASK;
389
390 /* save the current state in the old TSS */
391 if (type & 8) {
392 /* 32 bit */
393 stl_kernel(env->tr.base + 0x20, next_eip);
394 stl_kernel(env->tr.base + 0x24, old_eflags);
395 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
396 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
397 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
398 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
399 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
400 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
401 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
402 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
403 for(i = 0; i < 6; i++)
404 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
405#if defined(VBOX) && defined(DEBUG)
406 printf("TSS 32 bits switch\n");
407 printf("Saving CS=%08X\n", env->segs[R_CS].selector);
408#endif
409 } else {
410 /* 16 bit */
411 stw_kernel(env->tr.base + 0x0e, next_eip);
412 stw_kernel(env->tr.base + 0x10, old_eflags);
413 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
414 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
415 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
416 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
417 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
418 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
419 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
420 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
421 for(i = 0; i < 4; i++)
422 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
423 }
424
425 /* now if an exception occurs, it will occurs in the next task
426 context */
427
428 if (source == SWITCH_TSS_CALL) {
429 stw_kernel(tss_base, env->tr.selector);
430 new_eflags |= NT_MASK;
431 }
432
433 /* set busy bit */
434 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
435 target_ulong ptr;
436 uint32_t e2;
437 ptr = env->gdt.base + (tss_selector & ~7);
438 e2 = ldl_kernel(ptr + 4);
439 e2 |= DESC_TSS_BUSY_MASK;
440 stl_kernel(ptr + 4, e2);
441 }
442
443 /* set the new CPU state */
444 /* from this point, any exception which occurs can give problems */
445 env->cr[0] |= CR0_TS_MASK;
446 env->hflags |= HF_TS_MASK;
447 env->tr.selector = tss_selector;
448 env->tr.base = tss_base;
449 env->tr.limit = tss_limit;
450 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
451
452 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
453 cpu_x86_update_cr3(env, new_cr3);
454 }
455
456 /* load all registers without an exception, then reload them with
457 possible exception */
458 env->eip = new_eip;
459 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
460 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
461 if (!(type & 8))
462 eflags_mask &= 0xffff;
463 load_eflags(new_eflags, eflags_mask);
464 /* XXX: what to do in 16 bit case ? */
465 EAX = new_regs[0];
466 ECX = new_regs[1];
467 EDX = new_regs[2];
468 EBX = new_regs[3];
469 ESP = new_regs[4];
470 EBP = new_regs[5];
471 ESI = new_regs[6];
472 EDI = new_regs[7];
473 if (new_eflags & VM_MASK) {
474 for(i = 0; i < 6; i++)
475 load_seg_vm(i, new_segs[i]);
476 /* in vm86, CPL is always 3 */
477 cpu_x86_set_cpl(env, 3);
478 } else {
479 /* CPL is set the RPL of CS */
480 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
481 /* first just selectors as the rest may trigger exceptions */
482 for(i = 0; i < 6; i++)
483 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
484 }
485
486 env->ldt.selector = new_ldt & ~4;
487 env->ldt.base = 0;
488 env->ldt.limit = 0;
489 env->ldt.flags = 0;
490
491 /* load the LDT */
492 if (new_ldt & 4)
493 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
494
495 if ((new_ldt & 0xfffc) != 0) {
496 dt = &env->gdt;
497 index = new_ldt & ~7;
498 if ((index + 7) > dt->limit)
499 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
500 ptr = dt->base + index;
501 e1 = ldl_kernel(ptr);
502 e2 = ldl_kernel(ptr + 4);
503 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
504 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
505 if (!(e2 & DESC_P_MASK))
506 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
507 load_seg_cache_raw_dt(&env->ldt, e1, e2);
508 }
509
510 /* load the segments */
511 if (!(new_eflags & VM_MASK)) {
512 tss_load_seg(R_CS, new_segs[R_CS]);
513 tss_load_seg(R_SS, new_segs[R_SS]);
514 tss_load_seg(R_ES, new_segs[R_ES]);
515 tss_load_seg(R_DS, new_segs[R_DS]);
516 tss_load_seg(R_FS, new_segs[R_FS]);
517 tss_load_seg(R_GS, new_segs[R_GS]);
518 }
519
520 /* check that EIP is in the CS segment limits */
521 if (new_eip > env->segs[R_CS].limit) {
522 /* XXX: different exception if CALL ? */
523 raise_exception_err(EXCP0D_GPF, 0);
524 }
525}
526
527/* check if Port I/O is allowed in TSS */
528static inline void check_io(int addr, int size)
529{
530 int io_offset, val, mask;
531
532 /* TSS must be a valid 32 bit one */
533 if (!(env->tr.flags & DESC_P_MASK) ||
534 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
535 env->tr.limit < 103)
536 goto fail;
537 io_offset = lduw_kernel(env->tr.base + 0x66);
538 io_offset += (addr >> 3);
539 /* Note: the check needs two bytes */
540 if ((io_offset + 1) > env->tr.limit)
541 goto fail;
542 val = lduw_kernel(env->tr.base + io_offset);
543 val >>= (addr & 7);
544 mask = (1 << size) - 1;
545 /* all bits must be zero to allow the I/O */
546 if ((val & mask) != 0) {
547 fail:
548 raise_exception_err(EXCP0D_GPF, 0);
549 }
550}
551
552void check_iob_T0(void)
553{
554 check_io(T0, 1);
555}
556
557void check_iow_T0(void)
558{
559 check_io(T0, 2);
560}
561
562void check_iol_T0(void)
563{
564 check_io(T0, 4);
565}
566
567void check_iob_DX(void)
568{
569 check_io(EDX & 0xffff, 1);
570}
571
572void check_iow_DX(void)
573{
574 check_io(EDX & 0xffff, 2);
575}
576
577void check_iol_DX(void)
578{
579 check_io(EDX & 0xffff, 4);
580}
581
582static inline unsigned int get_sp_mask(unsigned int e2)
583{
584 if (e2 & DESC_B_MASK)
585 return 0xffffffff;
586 else
587 return 0xffff;
588}
589
590#ifdef TARGET_X86_64
591#define SET_ESP(val, sp_mask)\
592do {\
593 if ((sp_mask) == 0xffff)\
594 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
595 else if ((sp_mask) == 0xffffffffLL)\
596 ESP = (uint32_t)(val);\
597 else\
598 ESP = (val);\
599} while (0)
600#else
601#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
602#endif
603
604/* XXX: add a is_user flag to have proper security support */
605#define PUSHW(ssp, sp, sp_mask, val)\
606{\
607 sp -= 2;\
608 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
609}
610
611#define PUSHL(ssp, sp, sp_mask, val)\
612{\
613 sp -= 4;\
614 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
615}
616
617#define POPW(ssp, sp, sp_mask, val)\
618{\
619 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
620 sp += 2;\
621}
622
623#define POPL(ssp, sp, sp_mask, val)\
624{\
625 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
626 sp += 4;\
627}
628
629/* protected mode interrupt */
630static void do_interrupt_protected(int intno, int is_int, int error_code,
631 unsigned int next_eip, int is_hw)
632{
633 SegmentCache *dt;
634 target_ulong ptr, ssp;
635 int type, dpl, selector, ss_dpl, cpl;
636 int has_error_code, new_stack, shift;
637 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
638 uint32_t old_eip, sp_mask;
639
640#ifdef VBOX
641 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
642 cpu_loop_exit();
643#endif
644
645 has_error_code = 0;
646 if (!is_int && !is_hw) {
647 switch(intno) {
648 case 8:
649 case 10:
650 case 11:
651 case 12:
652 case 13:
653 case 14:
654 case 17:
655 has_error_code = 1;
656 break;
657 }
658 }
659 if (is_int)
660 old_eip = next_eip;
661 else
662 old_eip = env->eip;
663
664 dt = &env->idt;
665 if (intno * 8 + 7 > dt->limit)
666 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
667 ptr = dt->base + intno * 8;
668 e1 = ldl_kernel(ptr);
669 e2 = ldl_kernel(ptr + 4);
670 /* check gate type */
671 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
672 switch(type) {
673 case 5: /* task gate */
674 /* must do that check here to return the correct error code */
675 if (!(e2 & DESC_P_MASK))
676 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
677 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
678 if (has_error_code) {
679 int type;
680 uint32_t mask;
681 /* push the error code */
682 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
683 shift = type >> 3;
684 if (env->segs[R_SS].flags & DESC_B_MASK)
685 mask = 0xffffffff;
686 else
687 mask = 0xffff;
688 esp = (ESP - (2 << shift)) & mask;
689 ssp = env->segs[R_SS].base + esp;
690 if (shift)
691 stl_kernel(ssp, error_code);
692 else
693 stw_kernel(ssp, error_code);
694 SET_ESP(esp, mask);
695 }
696 return;
697 case 6: /* 286 interrupt gate */
698 case 7: /* 286 trap gate */
699 case 14: /* 386 interrupt gate */
700 case 15: /* 386 trap gate */
701 break;
702 default:
703 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
704 break;
705 }
706 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
707 cpl = env->hflags & HF_CPL_MASK;
708 /* check privledge if software int */
709 if (is_int && dpl < cpl)
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 /* check valid bit */
712 if (!(e2 & DESC_P_MASK))
713 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
714 selector = e1 >> 16;
715 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
716 if ((selector & 0xfffc) == 0)
717 raise_exception_err(EXCP0D_GPF, 0);
718
719 if (load_segment(&e1, &e2, selector) != 0)
720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
722 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
723 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
724 if (dpl > cpl)
725 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
726 if (!(e2 & DESC_P_MASK))
727 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
728 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
729 /* to inner priviledge */
730 get_ss_esp_from_tss(&ss, &esp, dpl);
731 if ((ss & 0xfffc) == 0)
732 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
733 if ((ss & 3) != dpl)
734 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
736 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
737 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
738 if (ss_dpl != dpl)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if (!(ss_e2 & DESC_S_MASK) ||
741 (ss_e2 & DESC_CS_MASK) ||
742 !(ss_e2 & DESC_W_MASK))
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 if (!(ss_e2 & DESC_P_MASK))
745#ifdef VBOX /* See page 3-477 of 253666.pdf */
746 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
747#else
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749#endif
750 new_stack = 1;
751 sp_mask = get_sp_mask(ss_e2);
752 ssp = get_seg_base(ss_e1, ss_e2);
753#if defined(VBOX) && defined(DEBUG)
754 printf("new stack %04X:%08X gate dpl=%d\n", ss, esp, dpl);
755#endif
756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757 /* to same priviledge */
758 if (env->eflags & VM_MASK)
759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760 new_stack = 0;
761 sp_mask = get_sp_mask(env->segs[R_SS].flags);
762 ssp = env->segs[R_SS].base;
763 esp = ESP;
764 dpl = cpl;
765 } else {
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0; /* avoid warning */
768 sp_mask = 0; /* avoid warning */
769 ssp = 0; /* avoid warning */
770 esp = 0; /* avoid warning */
771 }
772
773 shift = type >> 3;
774
775#if 0
776 /* XXX: check that enough room is available */
777 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778 if (env->eflags & VM_MASK)
779 push_size += 8;
780 push_size <<= shift;
781#endif
782 if (shift == 1) {
783 if (new_stack) {
784 if (env->eflags & VM_MASK) {
785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
789 }
790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791 PUSHL(ssp, esp, sp_mask, ESP);
792 }
793 PUSHL(ssp, esp, sp_mask, compute_eflags());
794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795 PUSHL(ssp, esp, sp_mask, old_eip);
796 if (has_error_code) {
797 PUSHL(ssp, esp, sp_mask, error_code);
798 }
799 } else {
800 if (new_stack) {
801 if (env->eflags & VM_MASK) {
802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
806 }
807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808 PUSHW(ssp, esp, sp_mask, ESP);
809 }
810 PUSHW(ssp, esp, sp_mask, compute_eflags());
811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812 PUSHW(ssp, esp, sp_mask, old_eip);
813 if (has_error_code) {
814 PUSHW(ssp, esp, sp_mask, error_code);
815 }
816 }
817
818 if (new_stack) {
819 if (env->eflags & VM_MASK) {
820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
824 }
825 ss = (ss & ~3) | dpl;
826 cpu_x86_load_seg_cache(env, R_SS, ss,
827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
828 }
829 SET_ESP(esp, sp_mask);
830
831 selector = (selector & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_CS, selector,
833 get_seg_base(e1, e2),
834 get_seg_limit(e1, e2),
835 e2);
836 cpu_x86_set_cpl(env, dpl);
837 env->eip = offset;
838
839 /* interrupt gate clear IF mask */
840 if ((type & 1) == 0) {
841 env->eflags &= ~IF_MASK;
842 }
843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
844}
845
846#ifdef VBOX
847
848/* check if VME interrupt redirection is enabled in TSS */
849static inline bool is_vme_irq_redirected(int intno)
850{
851 int io_offset, intredir_offset;
852 unsigned char val, mask;
853
854 /* TSS must be a valid 32 bit one */
855 if (!(env->tr.flags & DESC_P_MASK) ||
856 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
857 env->tr.limit < 103)
858 goto fail;
859 io_offset = lduw_kernel(env->tr.base + 0x66);
860 /* the virtual interrupt redirection bitmap is located below the io bitmap */
861 intredir_offset = io_offset - 0x20;
862
863 intredir_offset += (intno >> 3);
864 if ((intredir_offset) > env->tr.limit)
865 goto fail;
866
867 val = ldub_kernel(env->tr.base + intredir_offset);
868 mask = 1 << (unsigned char)(intno & 7);
869
870 /* bit set means no redirection. */
871 if ((val & mask) != 0) {
872 return false;
873 }
874 return true;
875
876fail:
877 raise_exception_err(EXCP0D_GPF, 0);
878 return true;
879}
880
881/* V86 mode software interrupt with CR4.VME=1 */
882static void do_soft_interrupt_vme(int intno, int error_code, unsigned int next_eip)
883{
884 target_ulong ptr, ssp;
885 int selector;
886 uint32_t offset, esp;
887 uint32_t old_cs, old_eflags;
888 uint32_t iopl;
889
890 iopl = ((env->eflags >> IOPL_SHIFT) & 3);
891
892 if (!is_vme_irq_redirected(intno))
893 {
894 if (iopl == 3)
895 /* normal protected mode handler call */
896 return do_interrupt_protected(intno, 1, error_code, next_eip, 0);
897 else
898 raise_exception_err(EXCP0D_GPF, 0);
899 }
900
901 /* virtual mode idt is at linear address 0 */
902 ptr = 0 + intno * 4;
903 offset = lduw_kernel(ptr);
904 selector = lduw_kernel(ptr + 2);
905 esp = ESP;
906 ssp = env->segs[R_SS].base;
907 old_cs = env->segs[R_CS].selector;
908
909 old_eflags = compute_eflags();
910 if (iopl < 3)
911 {
912 /* copy VIF into IF and set IOPL to 3 */
913 if (env->eflags & VIF_MASK)
914 old_eflags |= IF_MASK;
915 else
916 old_eflags &= ~IF_MASK;
917
918 old_eflags |= (3 << IOPL_SHIFT);
919 }
920
921 /* XXX: use SS segment size ? */
922 PUSHW(ssp, esp, 0xffff, old_eflags);
923 PUSHW(ssp, esp, 0xffff, old_cs);
924 PUSHW(ssp, esp, 0xffff, next_eip);
925
926 /* update processor state */
927 ESP = (ESP & ~0xffff) | (esp & 0xffff);
928 env->eip = offset;
929 env->segs[R_CS].selector = selector;
930 env->segs[R_CS].base = (selector << 4);
931 env->eflags &= ~(TF_MASK | RF_MASK);
932
933 if (iopl < 3)
934 env->eflags &= ~VIF_MASK;
935 else
936 env->eflags &= ~IF_MASK;
937}
938#endif /* VBOX */
939
940#ifdef TARGET_X86_64
941
942#define PUSHQ(sp, val)\
943{\
944 sp -= 8;\
945 stq_kernel(sp, (val));\
946}
947
948#define POPQ(sp, val)\
949{\
950 val = ldq_kernel(sp);\
951 sp += 8;\
952}
953
954static inline target_ulong get_rsp_from_tss(int level)
955{
956 int index;
957
958#if 0
959 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
960 env->tr.base, env->tr.limit);
961#endif
962
963 if (!(env->tr.flags & DESC_P_MASK))
964 cpu_abort(env, "invalid tss");
965 index = 8 * level + 4;
966 if ((index + 7) > env->tr.limit)
967 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
968 return ldq_kernel(env->tr.base + index);
969}
970
971/* 64 bit interrupt */
972static void do_interrupt64(int intno, int is_int, int error_code,
973 target_ulong next_eip, int is_hw)
974{
975 SegmentCache *dt;
976 target_ulong ptr;
977 int type, dpl, selector, cpl, ist;
978 int has_error_code, new_stack;
979 uint32_t e1, e2, e3, ss;
980 target_ulong old_eip, esp, offset;
981
982#ifdef VBOX
983 if (remR3NotifyTrap(env, intno, error_code, next_eip) != VINF_SUCCESS)
984 cpu_loop_exit();
985#endif
986
987 has_error_code = 0;
988 if (!is_int && !is_hw) {
989 switch(intno) {
990 case 8:
991 case 10:
992 case 11:
993 case 12:
994 case 13:
995 case 14:
996 case 17:
997 has_error_code = 1;
998 break;
999 }
1000 }
1001 if (is_int)
1002 old_eip = next_eip;
1003 else
1004 old_eip = env->eip;
1005
1006 dt = &env->idt;
1007 if (intno * 16 + 15 > dt->limit)
1008 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1009 ptr = dt->base + intno * 16;
1010 e1 = ldl_kernel(ptr);
1011 e2 = ldl_kernel(ptr + 4);
1012 e3 = ldl_kernel(ptr + 8);
1013 /* check gate type */
1014 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1015 switch(type) {
1016 case 14: /* 386 interrupt gate */
1017 case 15: /* 386 trap gate */
1018 break;
1019 default:
1020 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1021 break;
1022 }
1023 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1024 cpl = env->hflags & HF_CPL_MASK;
1025 /* check privledge if software int */
1026 if (is_int && dpl < cpl)
1027 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1028 /* check valid bit */
1029 if (!(e2 & DESC_P_MASK))
1030 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1031 selector = e1 >> 16;
1032 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1033 ist = e2 & 7;
1034 if ((selector & 0xfffc) == 0)
1035 raise_exception_err(EXCP0D_GPF, 0);
1036
1037 if (load_segment(&e1, &e2, selector) != 0)
1038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1039 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1041 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1042 if (dpl > cpl)
1043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1044 if (!(e2 & DESC_P_MASK))
1045 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1046 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1048 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1049 /* to inner priviledge */
1050 if (ist != 0)
1051 esp = get_rsp_from_tss(ist + 3);
1052 else
1053 esp = get_rsp_from_tss(dpl);
1054 esp &= ~0xfLL; /* align stack */
1055 ss = 0;
1056 new_stack = 1;
1057 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1058 /* to same priviledge */
1059 if (env->eflags & VM_MASK)
1060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1061 new_stack = 0;
1062 if (ist != 0)
1063 esp = get_rsp_from_tss(ist + 3);
1064 else
1065 esp = ESP;
1066 esp &= ~0xfLL; /* align stack */
1067 dpl = cpl;
1068 } else {
1069 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1070 new_stack = 0; /* avoid warning */
1071 esp = 0; /* avoid warning */
1072 }
1073
1074 PUSHQ(esp, env->segs[R_SS].selector);
1075 PUSHQ(esp, ESP);
1076 PUSHQ(esp, compute_eflags());
1077 PUSHQ(esp, env->segs[R_CS].selector);
1078 PUSHQ(esp, old_eip);
1079 if (has_error_code) {
1080 PUSHQ(esp, error_code);
1081 }
1082
1083 if (new_stack) {
1084 ss = 0 | dpl;
1085 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1086 }
1087 ESP = esp;
1088
1089 selector = (selector & ~3) | dpl;
1090 cpu_x86_load_seg_cache(env, R_CS, selector,
1091 get_seg_base(e1, e2),
1092 get_seg_limit(e1, e2),
1093 e2);
1094 cpu_x86_set_cpl(env, dpl);
1095 env->eip = offset;
1096
1097 /* interrupt gate clear IF mask */
1098 if ((type & 1) == 0) {
1099 env->eflags &= ~IF_MASK;
1100 }
1101 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1102}
1103#endif
1104
1105void helper_syscall(int next_eip_addend)
1106{
1107 int selector;
1108
1109 if (!(env->efer & MSR_EFER_SCE)) {
1110 raise_exception_err(EXCP06_ILLOP, 0);
1111 }
1112 selector = (env->star >> 32) & 0xffff;
1113#ifdef TARGET_X86_64
1114 if (env->hflags & HF_LMA_MASK) {
1115 int code64;
1116
1117 ECX = env->eip + next_eip_addend;
1118 env->regs[11] = compute_eflags();
1119
1120 code64 = env->hflags & HF_CS64_MASK;
1121
1122 cpu_x86_set_cpl(env, 0);
1123 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1124 0, 0xffffffff,
1125 DESC_G_MASK | DESC_P_MASK |
1126 DESC_S_MASK |
1127 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1128 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1129 0, 0xffffffff,
1130 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1131 DESC_S_MASK |
1132 DESC_W_MASK | DESC_A_MASK);
1133 env->eflags &= ~env->fmask;
1134 load_eflags(env->eflags, 0);
1135 if (code64)
1136 env->eip = env->lstar;
1137 else
1138 env->eip = env->cstar;
1139 } else
1140#endif
1141 {
1142 ECX = (uint32_t)(env->eip + next_eip_addend);
1143
1144 cpu_x86_set_cpl(env, 0);
1145 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1146 0, 0xffffffff,
1147 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1148 DESC_S_MASK |
1149 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1150 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1151 0, 0xffffffff,
1152 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1153 DESC_S_MASK |
1154 DESC_W_MASK | DESC_A_MASK);
1155 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1156 env->eip = (uint32_t)env->star;
1157 }
1158}
1159
1160void helper_sysret(int dflag)
1161{
1162 int cpl, selector;
1163
1164 if (!(env->efer & MSR_EFER_SCE)) {
1165 raise_exception_err(EXCP06_ILLOP, 0);
1166 }
1167 cpl = env->hflags & HF_CPL_MASK;
1168 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1169 raise_exception_err(EXCP0D_GPF, 0);
1170 }
1171 selector = (env->star >> 48) & 0xffff;
1172#ifdef TARGET_X86_64
1173 if (env->hflags & HF_LMA_MASK) {
1174 if (dflag == 2) {
1175 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1176 0, 0xffffffff,
1177 DESC_G_MASK | DESC_P_MASK |
1178 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1179 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1180 DESC_L_MASK);
1181 env->eip = ECX;
1182 } else {
1183 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1184 0, 0xffffffff,
1185 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1186 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1187 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1188 env->eip = (uint32_t)ECX;
1189 }
1190 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1191 0, 0xffffffff,
1192 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1193 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1194 DESC_W_MASK | DESC_A_MASK);
1195 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1196 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1197 cpu_x86_set_cpl(env, 3);
1198 } else
1199#endif
1200 {
1201 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1202 0, 0xffffffff,
1203 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1204 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1205 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1206 env->eip = (uint32_t)ECX;
1207 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1208 0, 0xffffffff,
1209 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1210 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1211 DESC_W_MASK | DESC_A_MASK);
1212 env->eflags |= IF_MASK;
1213 cpu_x86_set_cpl(env, 3);
1214 }
1215#ifdef USE_KQEMU
1216 if (kqemu_is_ok(env)) {
1217 if (env->hflags & HF_LMA_MASK)
1218 CC_OP = CC_OP_EFLAGS;
1219 env->exception_index = -1;
1220 cpu_loop_exit();
1221 }
1222#endif
1223}
1224
1225#ifdef VBOX
1226/**
1227 * Checks and processes external VMM events.
1228 * Called by op_check_external_event() when any of the flags is set and can be serviced.
1229 */
1230void helper_external_event(void)
1231{
1232#if defined(RT_OS_DARWIN) && defined(VBOX_STRICT)
1233 uintptr_t uESP;
1234 __asm__ __volatile__("movl %%esp, %0" : "=r" (uESP));
1235 AssertMsg(!(uESP & 15), ("esp=%#p\n", uESP));
1236#endif
1237 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD)
1238 {
1239 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_HARD);
1240 cpu_interrupt(env, CPU_INTERRUPT_HARD);
1241 }
1242 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_EXIT)
1243 {
1244 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_EXIT);
1245 cpu_interrupt(env, CPU_INTERRUPT_EXIT);
1246 }
1247 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_DMA)
1248 {
1249 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_DMA);
1250 remR3DmaRun(env);
1251 }
1252 if (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
1253 {
1254 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
1255 remR3TimersRun(env);
1256 }
1257}
1258/* helper for recording call instruction addresses for later scanning */
1259void helper_record_call()
1260{
1261 if ( !(env->state & CPU_RAW_RING0)
1262 && (env->cr[0] & CR0_PG_MASK)
1263 && !(env->eflags & X86_EFL_IF))
1264 remR3RecordCall(env);
1265}
1266#endif /* VBOX */
1267
1268/* real mode interrupt */
1269static void do_interrupt_real(int intno, int is_int, int error_code,
1270 unsigned int next_eip)
1271{
1272 SegmentCache *dt;
1273 target_ulong ptr, ssp;
1274 int selector;
1275 uint32_t offset, esp;
1276 uint32_t old_cs, old_eip;
1277
1278 /* real mode (simpler !) */
1279 dt = &env->idt;
1280 if (intno * 4 + 3 > dt->limit)
1281 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1282 ptr = dt->base + intno * 4;
1283 offset = lduw_kernel(ptr);
1284 selector = lduw_kernel(ptr + 2);
1285 esp = ESP;
1286 ssp = env->segs[R_SS].base;
1287 if (is_int)
1288 old_eip = next_eip;
1289 else
1290 old_eip = env->eip;
1291 old_cs = env->segs[R_CS].selector;
1292 /* XXX: use SS segment size ? */
1293 PUSHW(ssp, esp, 0xffff, compute_eflags());
1294 PUSHW(ssp, esp, 0xffff, old_cs);
1295 PUSHW(ssp, esp, 0xffff, old_eip);
1296
1297 /* update processor state */
1298 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1299 env->eip = offset;
1300 env->segs[R_CS].selector = selector;
1301 env->segs[R_CS].base = (selector << 4);
1302 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1303}
1304
1305/* fake user mode interrupt */
1306void do_interrupt_user(int intno, int is_int, int error_code,
1307 target_ulong next_eip)
1308{
1309 SegmentCache *dt;
1310 target_ulong ptr;
1311 int dpl, cpl;
1312 uint32_t e2;
1313
1314 dt = &env->idt;
1315 ptr = dt->base + (intno * 8);
1316 e2 = ldl_kernel(ptr + 4);
1317
1318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1319 cpl = env->hflags & HF_CPL_MASK;
1320 /* check privledge if software int */
1321 if (is_int && dpl < cpl)
1322 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1323
1324 /* Since we emulate only user space, we cannot do more than
1325 exiting the emulation with the suitable exception and error
1326 code */
1327 if (is_int)
1328 EIP = next_eip;
1329}
1330
1331/*
1332 * Begin execution of an interruption. is_int is TRUE if coming from
1333 * the int instruction. next_eip is the EIP value AFTER the interrupt
1334 * instruction. It is only relevant if is_int is TRUE.
1335 */
1336void do_interrupt(int intno, int is_int, int error_code,
1337 target_ulong next_eip, int is_hw)
1338{
1339 if (loglevel & CPU_LOG_INT) {
1340 if ((env->cr[0] & CR0_PE_MASK)) {
1341 static int count;
1342 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1343 count, intno, error_code, is_int,
1344 env->hflags & HF_CPL_MASK,
1345 env->segs[R_CS].selector, EIP,
1346 (int)env->segs[R_CS].base + EIP,
1347 env->segs[R_SS].selector, ESP);
1348 if (intno == 0x0e) {
1349 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1350 } else {
1351 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1352 }
1353 fprintf(logfile, "\n");
1354 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1355#if 0
1356 {
1357 int i;
1358 uint8_t *ptr;
1359 fprintf(logfile, " code=");
1360 ptr = env->segs[R_CS].base + env->eip;
1361 for(i = 0; i < 16; i++) {
1362 fprintf(logfile, " %02x", ldub(ptr + i));
1363 }
1364 fprintf(logfile, "\n");
1365 }
1366#endif
1367 count++;
1368 }
1369 }
1370 if (env->cr[0] & CR0_PE_MASK) {
1371#ifdef TARGET_X86_64
1372 if (env->hflags & HF_LMA_MASK) {
1373 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1374 } else
1375#endif
1376 {
1377#ifdef VBOX
1378 /* int xx *, v86 code and VME enabled? */
1379 if ( (env->eflags & VM_MASK)
1380 && (env->cr[4] & CR4_VME_MASK)
1381 && is_int
1382 && !is_hw
1383 && env->eip + 1 != next_eip /* single byte int 3 goes straight to the protected mode handler */
1384 )
1385 do_soft_interrupt_vme(intno, error_code, next_eip);
1386 else
1387#endif /* VBOX */
1388 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1389 }
1390 } else {
1391 do_interrupt_real(intno, is_int, error_code, next_eip);
1392 }
1393}
1394
1395/*
1396 * Signal an interruption. It is executed in the main CPU loop.
1397 * is_int is TRUE if coming from the int instruction. next_eip is the
1398 * EIP value AFTER the interrupt instruction. It is only relevant if
1399 * is_int is TRUE.
1400 */
1401void raise_interrupt(int intno, int is_int, int error_code,
1402 int next_eip_addend)
1403{
1404#if defined(VBOX) && defined(DEBUG)
1405 NOT_DMIK(Log2(("raise_interrupt: %x %x %x %VGv\n", intno, is_int, error_code, env->eip + next_eip_addend)));
1406#endif
1407 env->exception_index = intno;
1408 env->error_code = error_code;
1409 env->exception_is_int = is_int;
1410 env->exception_next_eip = env->eip + next_eip_addend;
1411 cpu_loop_exit();
1412}
1413
1414/* same as raise_exception_err, but do not restore global registers */
1415static void raise_exception_err_norestore(int exception_index, int error_code)
1416{
1417 env->exception_index = exception_index;
1418 env->error_code = error_code;
1419 env->exception_is_int = 0;
1420 env->exception_next_eip = 0;
1421 longjmp(env->jmp_env, 1);
1422}
1423
1424/* shortcuts to generate exceptions */
1425
1426void (raise_exception_err)(int exception_index, int error_code)
1427{
1428 raise_interrupt(exception_index, 0, error_code, 0);
1429}
1430
1431void raise_exception(int exception_index)
1432{
1433 raise_interrupt(exception_index, 0, 0, 0);
1434}
1435
1436/* SMM support */
1437
1438#if defined(CONFIG_USER_ONLY)
1439
1440void do_smm_enter(void)
1441{
1442}
1443
1444void helper_rsm(void)
1445{
1446}
1447
1448#else
1449
1450#ifdef TARGET_X86_64
1451#define SMM_REVISION_ID 0x00020064
1452#else
1453#define SMM_REVISION_ID 0x00020000
1454#endif
1455
1456void do_smm_enter(void)
1457{
1458#ifdef VBOX
1459 cpu_abort(env, "do_ssm_enter");
1460#else /* !VBOX */
1461 target_ulong sm_state;
1462 SegmentCache *dt;
1463 int i, offset;
1464
1465 if (loglevel & CPU_LOG_INT) {
1466 fprintf(logfile, "SMM: enter\n");
1467 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1468 }
1469
1470 env->hflags |= HF_SMM_MASK;
1471 cpu_smm_update(env);
1472
1473 sm_state = env->smbase + 0x8000;
1474
1475#ifdef TARGET_X86_64
1476 for(i = 0; i < 6; i++) {
1477 dt = &env->segs[i];
1478 offset = 0x7e00 + i * 16;
1479 stw_phys(sm_state + offset, dt->selector);
1480 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1481 stl_phys(sm_state + offset + 4, dt->limit);
1482 stq_phys(sm_state + offset + 8, dt->base);
1483 }
1484
1485 stq_phys(sm_state + 0x7e68, env->gdt.base);
1486 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1487
1488 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1489 stq_phys(sm_state + 0x7e78, env->ldt.base);
1490 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1491 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1492
1493 stq_phys(sm_state + 0x7e88, env->idt.base);
1494 stl_phys(sm_state + 0x7e84, env->idt.limit);
1495
1496 stw_phys(sm_state + 0x7e90, env->tr.selector);
1497 stq_phys(sm_state + 0x7e98, env->tr.base);
1498 stl_phys(sm_state + 0x7e94, env->tr.limit);
1499 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1500
1501 stq_phys(sm_state + 0x7ed0, env->efer);
1502
1503 stq_phys(sm_state + 0x7ff8, EAX);
1504 stq_phys(sm_state + 0x7ff0, ECX);
1505 stq_phys(sm_state + 0x7fe8, EDX);
1506 stq_phys(sm_state + 0x7fe0, EBX);
1507 stq_phys(sm_state + 0x7fd8, ESP);
1508 stq_phys(sm_state + 0x7fd0, EBP);
1509 stq_phys(sm_state + 0x7fc8, ESI);
1510 stq_phys(sm_state + 0x7fc0, EDI);
1511 for(i = 8; i < 16; i++)
1512 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1513 stq_phys(sm_state + 0x7f78, env->eip);
1514 stl_phys(sm_state + 0x7f70, compute_eflags());
1515 stl_phys(sm_state + 0x7f68, env->dr[6]);
1516 stl_phys(sm_state + 0x7f60, env->dr[7]);
1517
1518 stl_phys(sm_state + 0x7f48, env->cr[4]);
1519 stl_phys(sm_state + 0x7f50, env->cr[3]);
1520 stl_phys(sm_state + 0x7f58, env->cr[0]);
1521
1522 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1523 stl_phys(sm_state + 0x7f00, env->smbase);
1524#else
1525 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1526 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1527 stl_phys(sm_state + 0x7ff4, compute_eflags());
1528 stl_phys(sm_state + 0x7ff0, env->eip);
1529 stl_phys(sm_state + 0x7fec, EDI);
1530 stl_phys(sm_state + 0x7fe8, ESI);
1531 stl_phys(sm_state + 0x7fe4, EBP);
1532 stl_phys(sm_state + 0x7fe0, ESP);
1533 stl_phys(sm_state + 0x7fdc, EBX);
1534 stl_phys(sm_state + 0x7fd8, EDX);
1535 stl_phys(sm_state + 0x7fd4, ECX);
1536 stl_phys(sm_state + 0x7fd0, EAX);
1537 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1538 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1539
1540 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1541 stl_phys(sm_state + 0x7f64, env->tr.base);
1542 stl_phys(sm_state + 0x7f60, env->tr.limit);
1543 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1544
1545 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1546 stl_phys(sm_state + 0x7f80, env->ldt.base);
1547 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1548 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1549
1550 stl_phys(sm_state + 0x7f74, env->gdt.base);
1551 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1552
1553 stl_phys(sm_state + 0x7f58, env->idt.base);
1554 stl_phys(sm_state + 0x7f54, env->idt.limit);
1555
1556 for(i = 0; i < 6; i++) {
1557 dt = &env->segs[i];
1558 if (i < 3)
1559 offset = 0x7f84 + i * 12;
1560 else
1561 offset = 0x7f2c + (i - 3) * 12;
1562 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1563 stl_phys(sm_state + offset + 8, dt->base);
1564 stl_phys(sm_state + offset + 4, dt->limit);
1565 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1566 }
1567 stl_phys(sm_state + 0x7f14, env->cr[4]);
1568
1569 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1570 stl_phys(sm_state + 0x7ef8, env->smbase);
1571#endif
1572 /* init SMM cpu state */
1573
1574#ifdef TARGET_X86_64
1575 env->efer = 0;
1576 env->hflags &= ~HF_LMA_MASK;
1577#endif
1578 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1579 env->eip = 0x00008000;
1580 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1581 0xffffffff, 0);
1582 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1583 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1584 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1585 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1586 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1587
1588 cpu_x86_update_cr0(env,
1589 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1590 cpu_x86_update_cr4(env, 0);
1591 env->dr[7] = 0x00000400;
1592 CC_OP = CC_OP_EFLAGS;
1593#endif /* VBOX */
1594}
1595
1596void helper_rsm(void)
1597{
1598#ifdef VBOX
1599 cpu_abort(env, "helper_rsm");
1600#else /* !VBOX */
1601 target_ulong sm_state;
1602 int i, offset;
1603 uint32_t val;
1604
1605 sm_state = env->smbase + 0x8000;
1606#ifdef TARGET_X86_64
1607 env->efer = ldq_phys(sm_state + 0x7ed0);
1608 if (env->efer & MSR_EFER_LMA)
1609 env->hflags |= HF_LMA_MASK;
1610 else
1611 env->hflags &= ~HF_LMA_MASK;
1612
1613 for(i = 0; i < 6; i++) {
1614 offset = 0x7e00 + i * 16;
1615 cpu_x86_load_seg_cache(env, i,
1616 lduw_phys(sm_state + offset),
1617 ldq_phys(sm_state + offset + 8),
1618 ldl_phys(sm_state + offset + 4),
1619 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1620 }
1621
1622 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1623 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1624
1625 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1626 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1627 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1628 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1629
1630 env->idt.base = ldq_phys(sm_state + 0x7e88);
1631 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1632
1633 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1634 env->tr.base = ldq_phys(sm_state + 0x7e98);
1635 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1636 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1637
1638 EAX = ldq_phys(sm_state + 0x7ff8);
1639 ECX = ldq_phys(sm_state + 0x7ff0);
1640 EDX = ldq_phys(sm_state + 0x7fe8);
1641 EBX = ldq_phys(sm_state + 0x7fe0);
1642 ESP = ldq_phys(sm_state + 0x7fd8);
1643 EBP = ldq_phys(sm_state + 0x7fd0);
1644 ESI = ldq_phys(sm_state + 0x7fc8);
1645 EDI = ldq_phys(sm_state + 0x7fc0);
1646 for(i = 8; i < 16; i++)
1647 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1648 env->eip = ldq_phys(sm_state + 0x7f78);
1649 load_eflags(ldl_phys(sm_state + 0x7f70),
1650 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1651 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1652 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1653
1654 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1655 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1656 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1657
1658 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1659 if (val & 0x20000) {
1660 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1661 }
1662#else
1663 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1664 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1665 load_eflags(ldl_phys(sm_state + 0x7ff4),
1666 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1667 env->eip = ldl_phys(sm_state + 0x7ff0);
1668 EDI = ldl_phys(sm_state + 0x7fec);
1669 ESI = ldl_phys(sm_state + 0x7fe8);
1670 EBP = ldl_phys(sm_state + 0x7fe4);
1671 ESP = ldl_phys(sm_state + 0x7fe0);
1672 EBX = ldl_phys(sm_state + 0x7fdc);
1673 EDX = ldl_phys(sm_state + 0x7fd8);
1674 ECX = ldl_phys(sm_state + 0x7fd4);
1675 EAX = ldl_phys(sm_state + 0x7fd0);
1676 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1677 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1678
1679 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1680 env->tr.base = ldl_phys(sm_state + 0x7f64);
1681 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1682 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1683
1684 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1685 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1686 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1687 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1688
1689 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1690 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1691
1692 env->idt.base = ldl_phys(sm_state + 0x7f58);
1693 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1694
1695 for(i = 0; i < 6; i++) {
1696 if (i < 3)
1697 offset = 0x7f84 + i * 12;
1698 else
1699 offset = 0x7f2c + (i - 3) * 12;
1700 cpu_x86_load_seg_cache(env, i,
1701 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1702 ldl_phys(sm_state + offset + 8),
1703 ldl_phys(sm_state + offset + 4),
1704 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1705 }
1706 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1707
1708 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1709 if (val & 0x20000) {
1710 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1711 }
1712#endif
1713 CC_OP = CC_OP_EFLAGS;
1714 env->hflags &= ~HF_SMM_MASK;
1715 cpu_smm_update(env);
1716
1717 if (loglevel & CPU_LOG_INT) {
1718 fprintf(logfile, "SMM: after RSM\n");
1719 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1720 }
1721#endif /* !VBOX */
1722}
1723
1724#endif /* !CONFIG_USER_ONLY */
1725
1726
1727#ifdef BUGGY_GCC_DIV64
1728/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1729 call it from another function */
1730uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1731{
1732 *q_ptr = num / den;
1733 return num % den;
1734}
1735
1736int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1737{
1738 *q_ptr = num / den;
1739 return num % den;
1740}
1741#endif
1742
1743void helper_divl_EAX_T0(void)
1744{
1745 unsigned int den, r;
1746 uint64_t num, q;
1747
1748 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1749 den = T0;
1750 if (den == 0) {
1751 raise_exception(EXCP00_DIVZ);
1752 }
1753#ifdef BUGGY_GCC_DIV64
1754 r = div32(&q, num, den);
1755#else
1756 q = (num / den);
1757 r = (num % den);
1758#endif
1759 if (q > 0xffffffff)
1760 raise_exception(EXCP00_DIVZ);
1761 EAX = (uint32_t)q;
1762 EDX = (uint32_t)r;
1763}
1764
1765void helper_idivl_EAX_T0(void)
1766{
1767 int den, r;
1768 int64_t num, q;
1769
1770 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1771 den = T0;
1772 if (den == 0) {
1773 raise_exception(EXCP00_DIVZ);
1774 }
1775#ifdef BUGGY_GCC_DIV64
1776 r = idiv32(&q, num, den);
1777#else
1778 q = (num / den);
1779 r = (num % den);
1780#endif
1781 if (q != (int32_t)q)
1782 raise_exception(EXCP00_DIVZ);
1783 EAX = (uint32_t)q;
1784 EDX = (uint32_t)r;
1785}
1786
1787void helper_cmpxchg8b(void)
1788{
1789 uint64_t d;
1790 int eflags;
1791
1792 eflags = cc_table[CC_OP].compute_all();
1793 d = ldq(A0);
1794 if (d == (((uint64_t)EDX << 32) | EAX)) {
1795 stq(A0, ((uint64_t)ECX << 32) | EBX);
1796 eflags |= CC_Z;
1797 } else {
1798 /* always do the store */
1799 stq(A0, d);
1800 EDX = (uint32_t)(d >> 32);
1801 EAX = (uint32_t)d;
1802 eflags &= ~CC_Z;
1803 }
1804 CC_SRC = eflags;
1805}
1806
1807void helper_single_step()
1808{
1809 env->dr[6] |= 0x4000;
1810 raise_exception(EXCP01_SSTP);
1811}
1812
1813void helper_cpuid(void)
1814{
1815#ifndef VBOX
1816 uint32_t index;
1817 index = (uint32_t)EAX;
1818
1819 /* test if maximum index reached */
1820 if (index & 0x80000000) {
1821 if (index > env->cpuid_xlevel)
1822 index = env->cpuid_level;
1823 } else {
1824 if (index > env->cpuid_level)
1825 index = env->cpuid_level;
1826 }
1827
1828 switch(index) {
1829 case 0:
1830 EAX = env->cpuid_level;
1831 EBX = env->cpuid_vendor1;
1832 EDX = env->cpuid_vendor2;
1833 ECX = env->cpuid_vendor3;
1834 break;
1835 case 1:
1836 EAX = env->cpuid_version;
1837 EBX = 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1838 ECX = env->cpuid_ext_features;
1839 EDX = env->cpuid_features;
1840 break;
1841 case 2:
1842 /* cache info: needed for Pentium Pro compatibility */
1843 EAX = 0x410601;
1844 EBX = 0;
1845 ECX = 0;
1846 EDX = 0;
1847 break;
1848 case 0x80000000:
1849 EAX = env->cpuid_xlevel;
1850 EBX = env->cpuid_vendor1;
1851 EDX = env->cpuid_vendor2;
1852 ECX = env->cpuid_vendor3;
1853 break;
1854 case 0x80000001:
1855 EAX = env->cpuid_features;
1856 EBX = 0;
1857 ECX = 0;
1858 EDX = env->cpuid_ext2_features;
1859 break;
1860 case 0x80000002:
1861 case 0x80000003:
1862 case 0x80000004:
1863 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1864 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1865 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1866 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1867 break;
1868 case 0x80000005:
1869 /* cache info (L1 cache) */
1870 EAX = 0x01ff01ff;
1871 EBX = 0x01ff01ff;
1872 ECX = 0x40020140;
1873 EDX = 0x40020140;
1874 break;
1875 case 0x80000006:
1876 /* cache info (L2 cache) */
1877 EAX = 0;
1878 EBX = 0x42004200;
1879 ECX = 0x02008140;
1880 EDX = 0;
1881 break;
1882 case 0x80000008:
1883 /* virtual & phys address size in low 2 bytes. */
1884 EAX = 0x00003028;
1885 EBX = 0;
1886 ECX = 0;
1887 EDX = 0;
1888 break;
1889 default:
1890 /* reserved values: zero */
1891 EAX = 0;
1892 EBX = 0;
1893 ECX = 0;
1894 EDX = 0;
1895 break;
1896 }
1897#else /* VBOX */
1898 remR3CpuId(env, EAX, &EAX, &EBX, &ECX, &EDX);
1899#endif /* VBOX */
1900}
1901
1902void helper_enter_level(int level, int data32)
1903{
1904 target_ulong ssp;
1905 uint32_t esp_mask, esp, ebp;
1906
1907 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1908 ssp = env->segs[R_SS].base;
1909 ebp = EBP;
1910 esp = ESP;
1911 if (data32) {
1912 /* 32 bit */
1913 esp -= 4;
1914 while (--level) {
1915 esp -= 4;
1916 ebp -= 4;
1917 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1918 }
1919 esp -= 4;
1920 stl(ssp + (esp & esp_mask), T1);
1921 } else {
1922 /* 16 bit */
1923 esp -= 2;
1924 while (--level) {
1925 esp -= 2;
1926 ebp -= 2;
1927 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1928 }
1929 esp -= 2;
1930 stw(ssp + (esp & esp_mask), T1);
1931 }
1932}
1933
1934#ifdef TARGET_X86_64
1935void helper_enter64_level(int level, int data64)
1936{
1937 target_ulong esp, ebp;
1938 ebp = EBP;
1939 esp = ESP;
1940
1941 if (data64) {
1942 /* 64 bit */
1943 esp -= 8;
1944 while (--level) {
1945 esp -= 8;
1946 ebp -= 8;
1947 stq(esp, ldq(ebp));
1948 }
1949 esp -= 8;
1950 stq(esp, T1);
1951 } else {
1952 /* 16 bit */
1953 esp -= 2;
1954 while (--level) {
1955 esp -= 2;
1956 ebp -= 2;
1957 stw(esp, lduw(ebp));
1958 }
1959 esp -= 2;
1960 stw(esp, T1);
1961 }
1962}
1963#endif
1964
1965void helper_lldt_T0(void)
1966{
1967 int selector;
1968 SegmentCache *dt;
1969 uint32_t e1, e2;
1970 int index, entry_limit;
1971 target_ulong ptr;
1972#ifdef VBOX
1973 Log(("helper_lldt_T0: old ldtr=%RTsel {.base=%VGv, .limit=%VGv} new=%RTsel\n",
1974 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit, (RTSEL)(T0 & 0xffff)));
1975#endif
1976
1977 selector = T0 & 0xffff;
1978 if ((selector & 0xfffc) == 0) {
1979 /* XXX: NULL selector case: invalid LDT */
1980 env->ldt.base = 0;
1981 env->ldt.limit = 0;
1982 } else {
1983 if (selector & 0x4)
1984 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1985 dt = &env->gdt;
1986 index = selector & ~7;
1987#ifdef TARGET_X86_64
1988 if (env->hflags & HF_LMA_MASK)
1989 entry_limit = 15;
1990 else
1991#endif
1992 entry_limit = 7;
1993 if ((index + entry_limit) > dt->limit)
1994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1995 ptr = dt->base + index;
1996 e1 = ldl_kernel(ptr);
1997 e2 = ldl_kernel(ptr + 4);
1998 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2000 if (!(e2 & DESC_P_MASK))
2001 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2002#ifdef TARGET_X86_64
2003 if (env->hflags & HF_LMA_MASK) {
2004 uint32_t e3;
2005 e3 = ldl_kernel(ptr + 8);
2006 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2007 env->ldt.base |= (target_ulong)e3 << 32;
2008 } else
2009#endif
2010 {
2011 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2012 }
2013 }
2014 env->ldt.selector = selector;
2015#ifdef VBOX
2016 Log(("helper_lldt_T0: new ldtr=%RTsel {.base=%VGv, .limit=%VGv}\n",
2017 (RTSEL)env->ldt.selector, (RTGCPTR)env->ldt.base, (RTGCPTR)env->ldt.limit));
2018#endif
2019}
2020
2021void helper_ltr_T0(void)
2022{
2023 int selector;
2024 SegmentCache *dt;
2025 uint32_t e1, e2;
2026 int index, type, entry_limit;
2027 target_ulong ptr;
2028
2029#ifdef VBOX
2030 Log(("helper_ltr_T0: old tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2031 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2032 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2033#endif
2034
2035 selector = T0 & 0xffff;
2036 if ((selector & 0xfffc) == 0) {
2037 /* NULL selector case: invalid TR */
2038 env->tr.base = 0;
2039 env->tr.limit = 0;
2040 env->tr.flags = 0;
2041 } else {
2042 if (selector & 0x4)
2043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2044 dt = &env->gdt;
2045 index = selector & ~7;
2046#ifdef TARGET_X86_64
2047 if (env->hflags & HF_LMA_MASK)
2048 entry_limit = 15;
2049 else
2050#endif
2051 entry_limit = 7;
2052 if ((index + entry_limit) > dt->limit)
2053 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2054 ptr = dt->base + index;
2055 e1 = ldl_kernel(ptr);
2056 e2 = ldl_kernel(ptr + 4);
2057 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2058 if ((e2 & DESC_S_MASK) ||
2059 (type != 1 && type != 9))
2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061 if (!(e2 & DESC_P_MASK))
2062 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2063#ifdef TARGET_X86_64
2064 if (env->hflags & HF_LMA_MASK) {
2065 uint32_t e3;
2066 e3 = ldl_kernel(ptr + 8);
2067 load_seg_cache_raw_dt(&env->tr, e1, e2);
2068 env->tr.base |= (target_ulong)e3 << 32;
2069 } else
2070#endif
2071 {
2072 load_seg_cache_raw_dt(&env->tr, e1, e2);
2073 }
2074 e2 |= DESC_TSS_BUSY_MASK;
2075 stl_kernel(ptr + 4, e2);
2076 }
2077 env->tr.selector = selector;
2078#ifdef VBOX
2079 Log(("helper_ltr_T0: new tr=%RTsel {.base=%VGv, .limit=%VGv, .flags=%RX32} new=%RTsel\n",
2080 (RTSEL)env->tr.selector, (RTGCPTR)env->tr.base, (RTGCPTR)env->tr.limit,
2081 env->tr.flags, (RTSEL)(T0 & 0xffff)));
2082#endif
2083}
2084
2085/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2086void load_seg(int seg_reg, int selector)
2087{
2088 uint32_t e1, e2;
2089 int cpl, dpl, rpl;
2090 SegmentCache *dt;
2091 int index;
2092 target_ulong ptr;
2093
2094 selector &= 0xffff;
2095 cpl = env->hflags & HF_CPL_MASK;
2096
2097#ifdef VBOX
2098 /* Trying to load a selector with CPL=1? */
2099 if (cpl == 0 && (selector & 3) == 1 && (env->state & CPU_RAW_RING0))
2100 {
2101 Log(("RPL 1 -> sel %04X -> %04X\n", selector, selector & 0xfffc));
2102 selector = selector & 0xfffc;
2103 }
2104#endif
2105
2106 if ((selector & 0xfffc) == 0) {
2107 /* null selector case */
2108 if (seg_reg == R_SS
2109#ifdef TARGET_X86_64
2110 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2111#endif
2112 )
2113 raise_exception_err(EXCP0D_GPF, 0);
2114 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2115 } else {
2116
2117 if (selector & 0x4)
2118 dt = &env->ldt;
2119 else
2120 dt = &env->gdt;
2121 index = selector & ~7;
2122 if ((index + 7) > dt->limit)
2123 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2124 ptr = dt->base + index;
2125 e1 = ldl_kernel(ptr);
2126 e2 = ldl_kernel(ptr + 4);
2127
2128 if (!(e2 & DESC_S_MASK))
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 rpl = selector & 3;
2131 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2132 if (seg_reg == R_SS) {
2133 /* must be writable segment */
2134 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2135 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2136 if (rpl != cpl || dpl != cpl)
2137 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2138 } else {
2139 /* must be readable segment */
2140 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142
2143 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2144 /* if not conforming code, test rights */
2145 if (dpl < cpl || dpl < rpl)
2146 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147 }
2148 }
2149
2150 if (!(e2 & DESC_P_MASK)) {
2151 if (seg_reg == R_SS)
2152 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2153 else
2154 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2155 }
2156
2157 /* set the access bit if not already set */
2158 if (!(e2 & DESC_A_MASK)) {
2159 e2 |= DESC_A_MASK;
2160 stl_kernel(ptr + 4, e2);
2161 }
2162
2163 cpu_x86_load_seg_cache(env, seg_reg, selector,
2164 get_seg_base(e1, e2),
2165 get_seg_limit(e1, e2),
2166 e2);
2167#if 0
2168 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2169 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2170#endif
2171 }
2172}
2173
2174/* protected mode jump */
2175void helper_ljmp_protected_T0_T1(int next_eip_addend)
2176{
2177 int new_cs, gate_cs, type;
2178 uint32_t e1, e2, cpl, dpl, rpl, limit;
2179 target_ulong new_eip, next_eip;
2180
2181 new_cs = T0;
2182 new_eip = T1;
2183 if ((new_cs & 0xfffc) == 0)
2184 raise_exception_err(EXCP0D_GPF, 0);
2185 if (load_segment(&e1, &e2, new_cs) != 0)
2186 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2187 cpl = env->hflags & HF_CPL_MASK;
2188 if (e2 & DESC_S_MASK) {
2189 if (!(e2 & DESC_CS_MASK))
2190 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2192 if (e2 & DESC_C_MASK) {
2193 /* conforming code segment */
2194 if (dpl > cpl)
2195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2196 } else {
2197 /* non conforming code segment */
2198 rpl = new_cs & 3;
2199 if (rpl > cpl)
2200 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2201 if (dpl != cpl)
2202 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203 }
2204 if (!(e2 & DESC_P_MASK))
2205 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2206 limit = get_seg_limit(e1, e2);
2207 if (new_eip > limit &&
2208 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2209 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2211 get_seg_base(e1, e2), limit, e2);
2212 EIP = new_eip;
2213 } else {
2214 /* jump to call or task gate */
2215 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2216 rpl = new_cs & 3;
2217 cpl = env->hflags & HF_CPL_MASK;
2218 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2219 switch(type) {
2220 case 1: /* 286 TSS */
2221 case 9: /* 386 TSS */
2222 case 5: /* task gate */
2223 if (dpl < cpl || dpl < rpl)
2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225 next_eip = env->eip + next_eip_addend;
2226 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2227 CC_OP = CC_OP_EFLAGS;
2228 break;
2229 case 4: /* 286 call gate */
2230 case 12: /* 386 call gate */
2231 if ((dpl < cpl) || (dpl < rpl))
2232 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2233 if (!(e2 & DESC_P_MASK))
2234 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2235 gate_cs = e1 >> 16;
2236 new_eip = (e1 & 0xffff);
2237 if (type == 12)
2238 new_eip |= (e2 & 0xffff0000);
2239 if (load_segment(&e1, &e2, gate_cs) != 0)
2240 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2241 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2242 /* must be code segment */
2243 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2244 (DESC_S_MASK | DESC_CS_MASK)))
2245 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2246 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2247 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2248 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2249 if (!(e2 & DESC_P_MASK))
2250#ifdef VBOX /* See page 3-514 of 253666.pdf */
2251 raise_exception_err(EXCP0B_NOSEG, gate_cs & 0xfffc);
2252#else
2253 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2254#endif
2255 limit = get_seg_limit(e1, e2);
2256 if (new_eip > limit)
2257 raise_exception_err(EXCP0D_GPF, 0);
2258 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2259 get_seg_base(e1, e2), limit, e2);
2260 EIP = new_eip;
2261 break;
2262 default:
2263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2264 break;
2265 }
2266 }
2267}
2268
2269/* real mode call */
2270void helper_lcall_real_T0_T1(int shift, int next_eip)
2271{
2272 int new_cs, new_eip;
2273 uint32_t esp, esp_mask;
2274 target_ulong ssp;
2275
2276 new_cs = T0;
2277 new_eip = T1;
2278 esp = ESP;
2279 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2280 ssp = env->segs[R_SS].base;
2281 if (shift) {
2282 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2283 PUSHL(ssp, esp, esp_mask, next_eip);
2284 } else {
2285 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2286 PUSHW(ssp, esp, esp_mask, next_eip);
2287 }
2288
2289 SET_ESP(esp, esp_mask);
2290 env->eip = new_eip;
2291 env->segs[R_CS].selector = new_cs;
2292 env->segs[R_CS].base = (new_cs << 4);
2293}
2294
2295/* protected mode call */
2296void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2297{
2298 int new_cs, new_stack, i;
2299 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2300 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2301 uint32_t val, limit, old_sp_mask;
2302 target_ulong ssp, old_ssp, next_eip, new_eip;
2303
2304 new_cs = T0;
2305 new_eip = T1;
2306 next_eip = env->eip + next_eip_addend;
2307#ifdef DEBUG_PCALL
2308 if (loglevel & CPU_LOG_PCALL) {
2309 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2310 new_cs, (uint32_t)new_eip, shift);
2311 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2312 }
2313#endif
2314 if ((new_cs & 0xfffc) == 0)
2315 raise_exception_err(EXCP0D_GPF, 0);
2316 if (load_segment(&e1, &e2, new_cs) != 0)
2317 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2318 cpl = env->hflags & HF_CPL_MASK;
2319#ifdef DEBUG_PCALL
2320 if (loglevel & CPU_LOG_PCALL) {
2321 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2322 }
2323#endif
2324 if (e2 & DESC_S_MASK) {
2325 if (!(e2 & DESC_CS_MASK))
2326 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2327 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2328 if (e2 & DESC_C_MASK) {
2329 /* conforming code segment */
2330 if (dpl > cpl)
2331 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2332 } else {
2333 /* non conforming code segment */
2334 rpl = new_cs & 3;
2335 if (rpl > cpl)
2336 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2337 if (dpl != cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 }
2340 if (!(e2 & DESC_P_MASK))
2341 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2342
2343#ifdef TARGET_X86_64
2344 /* XXX: check 16/32 bit cases in long mode */
2345 if (shift == 2) {
2346 target_ulong rsp;
2347 /* 64 bit case */
2348 rsp = ESP;
2349 PUSHQ(rsp, env->segs[R_CS].selector);
2350 PUSHQ(rsp, next_eip);
2351 /* from this point, not restartable */
2352 ESP = rsp;
2353 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2354 get_seg_base(e1, e2),
2355 get_seg_limit(e1, e2), e2);
2356 EIP = new_eip;
2357 } else
2358#endif
2359 {
2360 sp = ESP;
2361 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2362 ssp = env->segs[R_SS].base;
2363 if (shift) {
2364 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2365 PUSHL(ssp, sp, sp_mask, next_eip);
2366 } else {
2367 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2368 PUSHW(ssp, sp, sp_mask, next_eip);
2369 }
2370
2371 limit = get_seg_limit(e1, e2);
2372 if (new_eip > limit)
2373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374 /* from this point, not restartable */
2375 SET_ESP(sp, sp_mask);
2376 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2377 get_seg_base(e1, e2), limit, e2);
2378 EIP = new_eip;
2379 }
2380 } else {
2381 /* check gate type */
2382 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2384 rpl = new_cs & 3;
2385 switch(type) {
2386 case 1: /* available 286 TSS */
2387 case 9: /* available 386 TSS */
2388 case 5: /* task gate */
2389 if (dpl < cpl || dpl < rpl)
2390 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2391 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2392 CC_OP = CC_OP_EFLAGS;
2393 return;
2394 case 4: /* 286 call gate */
2395 case 12: /* 386 call gate */
2396 break;
2397 default:
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 break;
2400 }
2401 shift = type >> 3;
2402
2403 if (dpl < cpl || dpl < rpl)
2404 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2405 /* check valid bit */
2406 if (!(e2 & DESC_P_MASK))
2407 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2408 selector = e1 >> 16;
2409 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2410 param_count = e2 & 0x1f;
2411 if ((selector & 0xfffc) == 0)
2412 raise_exception_err(EXCP0D_GPF, 0);
2413
2414 if (load_segment(&e1, &e2, selector) != 0)
2415 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2416 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2417 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2418 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2419 if (dpl > cpl)
2420 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2421 if (!(e2 & DESC_P_MASK))
2422 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2423
2424 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2425 /* to inner priviledge */
2426 get_ss_esp_from_tss(&ss, &sp, dpl);
2427#ifdef DEBUG_PCALL
2428 if (loglevel & CPU_LOG_PCALL)
2429 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2430 ss, sp, param_count, ESP);
2431#endif
2432 if ((ss & 0xfffc) == 0)
2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434 if ((ss & 3) != dpl)
2435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2437 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2439 if (ss_dpl != dpl)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (!(ss_e2 & DESC_S_MASK) ||
2442 (ss_e2 & DESC_CS_MASK) ||
2443 !(ss_e2 & DESC_W_MASK))
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 if (!(ss_e2 & DESC_P_MASK))
2446#ifdef VBOX /* See page 3-99 of 253666.pdf */
2447 raise_exception_err(EXCP0C_STACK, ss & 0xfffc);
2448#else
2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450#endif
2451
2452 // push_size = ((param_count * 2) + 8) << shift;
2453
2454 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455 old_ssp = env->segs[R_SS].base;
2456
2457 sp_mask = get_sp_mask(ss_e2);
2458 ssp = get_seg_base(ss_e1, ss_e2);
2459 if (shift) {
2460 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461 PUSHL(ssp, sp, sp_mask, ESP);
2462 for(i = param_count - 1; i >= 0; i--) {
2463 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464 PUSHL(ssp, sp, sp_mask, val);
2465 }
2466 } else {
2467 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHW(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471 PUSHW(ssp, sp, sp_mask, val);
2472 }
2473 }
2474 new_stack = 1;
2475 } else {
2476 /* to same priviledge */
2477 sp = ESP;
2478 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479 ssp = env->segs[R_SS].base;
2480 // push_size = (4 << shift);
2481 new_stack = 0;
2482 }
2483
2484 if (shift) {
2485 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486 PUSHL(ssp, sp, sp_mask, next_eip);
2487 } else {
2488 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489 PUSHW(ssp, sp, sp_mask, next_eip);
2490 }
2491
2492 /* from this point, not restartable */
2493
2494 if (new_stack) {
2495 ss = (ss & ~3) | dpl;
2496 cpu_x86_load_seg_cache(env, R_SS, ss,
2497 ssp,
2498 get_seg_limit(ss_e1, ss_e2),
2499 ss_e2);
2500 }
2501
2502 selector = (selector & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_CS, selector,
2504 get_seg_base(e1, e2),
2505 get_seg_limit(e1, e2),
2506 e2);
2507 cpu_x86_set_cpl(env, dpl);
2508 SET_ESP(sp, sp_mask);
2509 EIP = offset;
2510 }
2511#ifdef USE_KQEMU
2512 if (kqemu_is_ok(env)) {
2513 env->exception_index = -1;
2514 cpu_loop_exit();
2515 }
2516#endif
2517}
2518
2519/* real and vm86 mode iret */
2520void helper_iret_real(int shift)
2521{
2522 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2523 target_ulong ssp;
2524 int eflags_mask;
2525#ifdef VBOX
2526 bool fVME = false;
2527
2528 remR3TrapClear(env->pVM);
2529#endif /* VBOX */
2530
2531 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2532 sp = ESP;
2533 ssp = env->segs[R_SS].base;
2534 if (shift == 1) {
2535 /* 32 bits */
2536 POPL(ssp, sp, sp_mask, new_eip);
2537 POPL(ssp, sp, sp_mask, new_cs);
2538 new_cs &= 0xffff;
2539 POPL(ssp, sp, sp_mask, new_eflags);
2540 } else {
2541 /* 16 bits */
2542 POPW(ssp, sp, sp_mask, new_eip);
2543 POPW(ssp, sp, sp_mask, new_cs);
2544 POPW(ssp, sp, sp_mask, new_eflags);
2545 }
2546#ifdef VBOX
2547 if ( (env->eflags & VM_MASK)
2548 && ((env->eflags >> IOPL_SHIFT) & 3) != 3
2549 && (env->cr[4] & CR4_VME_MASK)) /* implied or else we would fault earlier */
2550 {
2551 fVME = true;
2552 /* if virtual interrupt pending and (virtual) interrupts will be enabled -> #GP */
2553 /* if TF will be set -> #GP */
2554 if ( ((new_eflags & IF_MASK) && (env->eflags & VIP_MASK))
2555 || (new_eflags & TF_MASK))
2556 raise_exception(EXCP0D_GPF);
2557 }
2558#endif /* VBOX */
2559
2560 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2561 load_seg_vm(R_CS, new_cs);
2562 env->eip = new_eip;
2563#ifdef VBOX
2564 if (fVME)
2565 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2566 else
2567#endif
2568 if (env->eflags & VM_MASK)
2569 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2570 else
2571 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2572 if (shift == 0)
2573 eflags_mask &= 0xffff;
2574 load_eflags(new_eflags, eflags_mask);
2575
2576#ifdef VBOX
2577 if (fVME)
2578 {
2579 if (new_eflags & IF_MASK)
2580 env->eflags |= VIF_MASK;
2581 else
2582 env->eflags &= ~VIF_MASK;
2583 }
2584#endif /* VBOX */
2585}
2586
2587static inline void validate_seg(int seg_reg, int cpl)
2588{
2589 int dpl;
2590 uint32_t e2;
2591
2592 /* XXX: on x86_64, we do not want to nullify FS and GS because
2593 they may still contain a valid base. I would be interested to
2594 know how a real x86_64 CPU behaves */
2595 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2596 (env->segs[seg_reg].selector & 0xfffc) == 0)
2597 return;
2598
2599 e2 = env->segs[seg_reg].flags;
2600 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2601 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2602 /* data or non conforming code segment */
2603 if (dpl < cpl) {
2604 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2605 }
2606 }
2607}
2608
2609/* protected mode iret */
2610static inline void helper_ret_protected(int shift, int is_iret, int addend)
2611{
2612 uint32_t new_cs, new_eflags, new_ss;
2613 uint32_t new_es, new_ds, new_fs, new_gs;
2614 uint32_t e1, e2, ss_e1, ss_e2;
2615 int cpl, dpl, rpl, eflags_mask, iopl;
2616 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2617
2618#ifdef TARGET_X86_64
2619 if (shift == 2)
2620 sp_mask = -1;
2621 else
2622#endif
2623 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2624 sp = ESP;
2625 ssp = env->segs[R_SS].base;
2626 new_eflags = 0; /* avoid warning */
2627#ifdef TARGET_X86_64
2628 if (shift == 2) {
2629 POPQ(sp, new_eip);
2630 POPQ(sp, new_cs);
2631 new_cs &= 0xffff;
2632 if (is_iret) {
2633 POPQ(sp, new_eflags);
2634 }
2635 } else
2636#endif
2637 if (shift == 1) {
2638 /* 32 bits */
2639 POPL(ssp, sp, sp_mask, new_eip);
2640 POPL(ssp, sp, sp_mask, new_cs);
2641 new_cs &= 0xffff;
2642 if (is_iret) {
2643 POPL(ssp, sp, sp_mask, new_eflags);
2644#if defined(VBOX) && defined(DEBUG)
2645 printf("iret: new CS %04X\n", new_cs);
2646 printf("iret: new EIP %08X\n", new_eip);
2647 printf("iret: new EFLAGS %08X\n", new_eflags);
2648 printf("iret: EAX=%08x\n", EAX);
2649#endif
2650
2651 if (new_eflags & VM_MASK)
2652 goto return_to_vm86;
2653 }
2654#ifdef VBOX
2655 if ((new_cs & 0x3) == 1 && (env->state & CPU_RAW_RING0))
2656 {
2657#ifdef DEBUG
2658 printf("RPL 1 -> new_cs %04X -> %04X\n", new_cs, new_cs & 0xfffc);
2659#endif
2660 new_cs = new_cs & 0xfffc;
2661 }
2662#endif
2663 } else {
2664 /* 16 bits */
2665 POPW(ssp, sp, sp_mask, new_eip);
2666 POPW(ssp, sp, sp_mask, new_cs);
2667 if (is_iret)
2668 POPW(ssp, sp, sp_mask, new_eflags);
2669 }
2670#ifdef DEBUG_PCALL
2671 if (loglevel & CPU_LOG_PCALL) {
2672 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2673 new_cs, new_eip, shift, addend);
2674 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2675 }
2676#endif
2677 if ((new_cs & 0xfffc) == 0)
2678 {
2679#if defined(VBOX) && defined(DEBUG)
2680 printf("new_cs & 0xfffc) == 0\n");
2681#endif
2682 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2683 }
2684 if (load_segment(&e1, &e2, new_cs) != 0)
2685 {
2686#if defined(VBOX) && defined(DEBUG)
2687 printf("load_segment failed\n");
2688#endif
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 }
2691 if (!(e2 & DESC_S_MASK) ||
2692 !(e2 & DESC_CS_MASK))
2693 {
2694#if defined(VBOX) && defined(DEBUG)
2695 printf("e2 mask %08x\n", e2);
2696#endif
2697 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2698 }
2699 cpl = env->hflags & HF_CPL_MASK;
2700 rpl = new_cs & 3;
2701 if (rpl < cpl)
2702 {
2703#if defined(VBOX) && defined(DEBUG)
2704 printf("rpl < cpl (%d vs %d)\n", rpl, cpl);
2705#endif
2706 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2707 }
2708 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2709 if (e2 & DESC_C_MASK) {
2710 if (dpl > rpl)
2711 {
2712#if defined(VBOX) && defined(DEBUG)
2713 printf("dpl > rpl (%d vs %d)\n", dpl, rpl);
2714#endif
2715 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2716 }
2717 } else {
2718 if (dpl != rpl)
2719 {
2720#if defined(VBOX) && defined(DEBUG)
2721 printf("dpl != rpl (%d vs %d) e1=%x e2=%x\n", dpl, rpl, e1, e2);
2722#endif
2723 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2724 }
2725 }
2726 if (!(e2 & DESC_P_MASK))
2727 {
2728#if defined(VBOX) && defined(DEBUG)
2729 printf("DESC_P_MASK e2=%08x\n", e2);
2730#endif
2731 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2732 }
2733 sp += addend;
2734 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2735 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2736 /* return to same priledge level */
2737 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2738 get_seg_base(e1, e2),
2739 get_seg_limit(e1, e2),
2740 e2);
2741 } else {
2742 /* return to different priviledge level */
2743#ifdef TARGET_X86_64
2744 if (shift == 2) {
2745 POPQ(sp, new_esp);
2746 POPQ(sp, new_ss);
2747 new_ss &= 0xffff;
2748 } else
2749#endif
2750 if (shift == 1) {
2751 /* 32 bits */
2752 POPL(ssp, sp, sp_mask, new_esp);
2753 POPL(ssp, sp, sp_mask, new_ss);
2754 new_ss &= 0xffff;
2755 } else {
2756 /* 16 bits */
2757 POPW(ssp, sp, sp_mask, new_esp);
2758 POPW(ssp, sp, sp_mask, new_ss);
2759 }
2760#ifdef DEBUG_PCALL
2761 if (loglevel & CPU_LOG_PCALL) {
2762 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2763 new_ss, new_esp);
2764 }
2765#endif
2766 if ((new_ss & 0xfffc) == 0) {
2767#ifdef TARGET_X86_64
2768 /* NULL ss is allowed in long mode if cpl != 3*/
2769 /* XXX: test CS64 ? */
2770 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2771 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2772 0, 0xffffffff,
2773 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2774 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2775 DESC_W_MASK | DESC_A_MASK);
2776 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2777 } else
2778#endif
2779 {
2780 raise_exception_err(EXCP0D_GPF, 0);
2781 }
2782 } else {
2783 if ((new_ss & 3) != rpl)
2784 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2785 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2786 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2787 if (!(ss_e2 & DESC_S_MASK) ||
2788 (ss_e2 & DESC_CS_MASK) ||
2789 !(ss_e2 & DESC_W_MASK))
2790 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2791 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2792 if (dpl != rpl)
2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794 if (!(ss_e2 & DESC_P_MASK))
2795 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2796 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2797 get_seg_base(ss_e1, ss_e2),
2798 get_seg_limit(ss_e1, ss_e2),
2799 ss_e2);
2800 }
2801
2802 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2803 get_seg_base(e1, e2),
2804 get_seg_limit(e1, e2),
2805 e2);
2806 cpu_x86_set_cpl(env, rpl);
2807 sp = new_esp;
2808#ifdef TARGET_X86_64
2809 if (env->hflags & HF_CS64_MASK)
2810 sp_mask = -1;
2811 else
2812#endif
2813 sp_mask = get_sp_mask(ss_e2);
2814
2815 /* validate data segments */
2816 validate_seg(R_ES, rpl);
2817 validate_seg(R_DS, rpl);
2818 validate_seg(R_FS, rpl);
2819 validate_seg(R_GS, rpl);
2820
2821 sp += addend;
2822 }
2823 SET_ESP(sp, sp_mask);
2824 env->eip = new_eip;
2825 if (is_iret) {
2826 /* NOTE: 'cpl' is the _old_ CPL */
2827 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2828 if (cpl == 0)
2829#ifdef VBOX
2830 eflags_mask |= IOPL_MASK | VIF_MASK | VIP_MASK;
2831#else
2832 eflags_mask |= IOPL_MASK;
2833#endif
2834 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2835 if (cpl <= iopl)
2836 eflags_mask |= IF_MASK;
2837 if (shift == 0)
2838 eflags_mask &= 0xffff;
2839 load_eflags(new_eflags, eflags_mask);
2840 }
2841 return;
2842
2843 return_to_vm86:
2844
2845#if 0 // defined(VBOX) && defined(DEBUG)
2846 printf("V86: new CS %04X\n", new_cs);
2847 printf("V86: Descriptor %08X:%08X\n", e2, e1);
2848 printf("V86: new EIP %08X\n", new_eip);
2849 printf("V86: new EFLAGS %08X\n", new_eflags);
2850#endif
2851
2852 POPL(ssp, sp, sp_mask, new_esp);
2853 POPL(ssp, sp, sp_mask, new_ss);
2854 POPL(ssp, sp, sp_mask, new_es);
2855 POPL(ssp, sp, sp_mask, new_ds);
2856 POPL(ssp, sp, sp_mask, new_fs);
2857 POPL(ssp, sp, sp_mask, new_gs);
2858
2859 /* modify processor state */
2860 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2861 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2862 load_seg_vm(R_CS, new_cs & 0xffff);
2863 cpu_x86_set_cpl(env, 3);
2864 load_seg_vm(R_SS, new_ss & 0xffff);
2865 load_seg_vm(R_ES, new_es & 0xffff);
2866 load_seg_vm(R_DS, new_ds & 0xffff);
2867 load_seg_vm(R_FS, new_fs & 0xffff);
2868 load_seg_vm(R_GS, new_gs & 0xffff);
2869
2870 env->eip = new_eip & 0xffff;
2871 ESP = new_esp;
2872}
2873
2874void helper_iret_protected(int shift, int next_eip)
2875{
2876 int tss_selector, type;
2877 uint32_t e1, e2;
2878
2879#ifdef VBOX
2880 remR3TrapClear(env->pVM);
2881#endif
2882
2883 /* specific case for TSS */
2884 if (env->eflags & NT_MASK) {
2885#ifdef TARGET_X86_64
2886 if (env->hflags & HF_LMA_MASK)
2887 raise_exception_err(EXCP0D_GPF, 0);
2888#endif
2889 tss_selector = lduw_kernel(env->tr.base + 0);
2890 if (tss_selector & 4)
2891 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2892 if (load_segment(&e1, &e2, tss_selector) != 0)
2893 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2894 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2895 /* NOTE: we check both segment and busy TSS */
2896 if (type != 3)
2897 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2898 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2899 } else {
2900 helper_ret_protected(shift, 1, 0);
2901 }
2902#ifdef USE_KQEMU
2903 if (kqemu_is_ok(env)) {
2904 CC_OP = CC_OP_EFLAGS;
2905 env->exception_index = -1;
2906 cpu_loop_exit();
2907 }
2908#endif
2909}
2910
2911void helper_lret_protected(int shift, int addend)
2912{
2913 helper_ret_protected(shift, 0, addend);
2914#ifdef USE_KQEMU
2915 if (kqemu_is_ok(env)) {
2916 env->exception_index = -1;
2917 cpu_loop_exit();
2918 }
2919#endif
2920}
2921
2922void helper_sysenter(void)
2923{
2924 if (env->sysenter_cs == 0) {
2925 raise_exception_err(EXCP0D_GPF, 0);
2926 }
2927 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2928 cpu_x86_set_cpl(env, 0);
2929 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2930 0, 0xffffffff,
2931 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2932 DESC_S_MASK |
2933 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2934 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2935 0, 0xffffffff,
2936 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2937 DESC_S_MASK |
2938 DESC_W_MASK | DESC_A_MASK);
2939 ESP = env->sysenter_esp;
2940 EIP = env->sysenter_eip;
2941}
2942
2943void helper_sysexit(void)
2944{
2945 int cpl;
2946
2947 cpl = env->hflags & HF_CPL_MASK;
2948 if (env->sysenter_cs == 0 || cpl != 0) {
2949 raise_exception_err(EXCP0D_GPF, 0);
2950 }
2951 cpu_x86_set_cpl(env, 3);
2952 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2953 0, 0xffffffff,
2954 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2955 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2956 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2957 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2958 0, 0xffffffff,
2959 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2960 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2961 DESC_W_MASK | DESC_A_MASK);
2962 ESP = ECX;
2963 EIP = EDX;
2964#ifdef USE_KQEMU
2965 if (kqemu_is_ok(env)) {
2966 env->exception_index = -1;
2967 cpu_loop_exit();
2968 }
2969#endif
2970}
2971
2972void helper_movl_crN_T0(int reg)
2973{
2974#if !defined(CONFIG_USER_ONLY)
2975 switch(reg) {
2976 case 0:
2977 cpu_x86_update_cr0(env, T0);
2978 break;
2979 case 3:
2980 cpu_x86_update_cr3(env, T0);
2981 break;
2982 case 4:
2983 cpu_x86_update_cr4(env, T0);
2984 break;
2985 case 8:
2986 cpu_set_apic_tpr(env, T0);
2987 break;
2988 default:
2989 env->cr[reg] = T0;
2990 break;
2991 }
2992#endif
2993}
2994
2995/* XXX: do more */
2996void helper_movl_drN_T0(int reg)
2997{
2998 env->dr[reg] = T0;
2999}
3000
3001void helper_invlpg(target_ulong addr)
3002{
3003 cpu_x86_flush_tlb(env, addr);
3004}
3005
3006void helper_rdtsc(void)
3007{
3008 uint64_t val;
3009
3010 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3011 raise_exception(EXCP0D_GPF);
3012 }
3013 val = cpu_get_tsc(env);
3014 EAX = (uint32_t)(val);
3015 EDX = (uint32_t)(val >> 32);
3016}
3017
3018#if defined(CONFIG_USER_ONLY)
3019void helper_wrmsr(void)
3020{
3021}
3022
3023void helper_rdmsr(void)
3024{
3025}
3026#else
3027void helper_wrmsr(void)
3028{
3029 uint64_t val;
3030
3031 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3032
3033 switch((uint32_t)ECX) {
3034 case MSR_IA32_SYSENTER_CS:
3035 env->sysenter_cs = val & 0xffff;
3036 break;
3037 case MSR_IA32_SYSENTER_ESP:
3038 env->sysenter_esp = val;
3039 break;
3040 case MSR_IA32_SYSENTER_EIP:
3041 env->sysenter_eip = val;
3042 break;
3043 case MSR_IA32_APICBASE:
3044 cpu_set_apic_base(env, val);
3045 break;
3046 case MSR_EFER:
3047 {
3048 uint64_t update_mask;
3049 update_mask = 0;
3050 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3051 update_mask |= MSR_EFER_SCE;
3052 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3053 update_mask |= MSR_EFER_LME;
3054 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3055 update_mask |= MSR_EFER_FFXSR;
3056 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3057 update_mask |= MSR_EFER_NXE;
3058 env->efer = (env->efer & ~update_mask) |
3059 (val & update_mask);
3060 }
3061 break;
3062 case MSR_STAR:
3063 env->star = val;
3064 break;
3065 case MSR_PAT:
3066 env->pat = val;
3067 break;
3068#ifdef TARGET_X86_64
3069 case MSR_LSTAR:
3070 env->lstar = val;
3071 break;
3072 case MSR_CSTAR:
3073 env->cstar = val;
3074 break;
3075 case MSR_FMASK:
3076 env->fmask = val;
3077 break;
3078 case MSR_FSBASE:
3079 env->segs[R_FS].base = val;
3080 break;
3081 case MSR_GSBASE:
3082 env->segs[R_GS].base = val;
3083 break;
3084 case MSR_KERNELGSBASE:
3085 env->kernelgsbase = val;
3086 break;
3087#endif
3088 default:
3089#ifndef VBOX
3090 /* XXX: exception ? */
3091 break;
3092#else /* VBOX */
3093 {
3094 uint32_t ecx = (uint32_t)ECX;
3095 /* In X2APIC specification this range is reserved for APIC control. */
3096 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3097 cpu_apic_wrmsr(env, ecx, val);
3098 /** @todo else exception? */
3099 break;
3100 }
3101#endif /* VBOX */
3102 }
3103}
3104
3105void helper_rdmsr(void)
3106{
3107 uint64_t val;
3108 switch((uint32_t)ECX) {
3109 case MSR_IA32_SYSENTER_CS:
3110 val = env->sysenter_cs;
3111 break;
3112 case MSR_IA32_SYSENTER_ESP:
3113 val = env->sysenter_esp;
3114 break;
3115 case MSR_IA32_SYSENTER_EIP:
3116 val = env->sysenter_eip;
3117 break;
3118 case MSR_IA32_APICBASE:
3119 val = cpu_get_apic_base(env);
3120 break;
3121 case MSR_EFER:
3122 val = env->efer;
3123 break;
3124 case MSR_STAR:
3125 val = env->star;
3126 break;
3127 case MSR_PAT:
3128 val = env->pat;
3129 break;
3130#ifdef TARGET_X86_64
3131 case MSR_LSTAR:
3132 val = env->lstar;
3133 break;
3134 case MSR_CSTAR:
3135 val = env->cstar;
3136 break;
3137 case MSR_FMASK:
3138 val = env->fmask;
3139 break;
3140 case MSR_FSBASE:
3141 val = env->segs[R_FS].base;
3142 break;
3143 case MSR_GSBASE:
3144 val = env->segs[R_GS].base;
3145 break;
3146 case MSR_KERNELGSBASE:
3147 val = env->kernelgsbase;
3148 break;
3149#endif
3150 default:
3151#ifndef VBOX
3152 /* XXX: exception ? */
3153 val = 0;
3154 break;
3155#else /* VBOX */
3156 {
3157 uint32_t ecx = (uint32_t)ECX;
3158 /* In X2APIC specification this range is reserved for APIC control. */
3159 if (ecx >= MSR_APIC_RANGE_START && ecx < MSR_APIC_RANGE_END)
3160 val = cpu_apic_rdmsr(env, ecx);
3161 else
3162 val = 0; /** @todo else exception? */
3163 break;
3164 }
3165#endif /* VBOX */
3166 }
3167 EAX = (uint32_t)(val);
3168 EDX = (uint32_t)(val >> 32);
3169}
3170#endif
3171
3172void helper_lsl(void)
3173{
3174 unsigned int selector, limit;
3175 uint32_t e1, e2, eflags;
3176 int rpl, dpl, cpl, type;
3177
3178 eflags = cc_table[CC_OP].compute_all();
3179 selector = T0 & 0xffff;
3180 if (load_segment(&e1, &e2, selector) != 0)
3181 goto fail;
3182 rpl = selector & 3;
3183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3184 cpl = env->hflags & HF_CPL_MASK;
3185 if (e2 & DESC_S_MASK) {
3186 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3187 /* conforming */
3188 } else {
3189 if (dpl < cpl || dpl < rpl)
3190 goto fail;
3191 }
3192 } else {
3193 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3194 switch(type) {
3195 case 1:
3196 case 2:
3197 case 3:
3198 case 9:
3199 case 11:
3200 break;
3201 default:
3202 goto fail;
3203 }
3204 if (dpl < cpl || dpl < rpl) {
3205 fail:
3206 CC_SRC = eflags & ~CC_Z;
3207 return;
3208 }
3209 }
3210 limit = get_seg_limit(e1, e2);
3211 T1 = limit;
3212 CC_SRC = eflags | CC_Z;
3213}
3214
3215void helper_lar(void)
3216{
3217 unsigned int selector;
3218 uint32_t e1, e2, eflags;
3219 int rpl, dpl, cpl, type;
3220
3221 eflags = cc_table[CC_OP].compute_all();
3222 selector = T0 & 0xffff;
3223 if ((selector & 0xfffc) == 0)
3224 goto fail;
3225 if (load_segment(&e1, &e2, selector) != 0)
3226 goto fail;
3227 rpl = selector & 3;
3228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3229 cpl = env->hflags & HF_CPL_MASK;
3230 if (e2 & DESC_S_MASK) {
3231 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3232 /* conforming */
3233 } else {
3234 if (dpl < cpl || dpl < rpl)
3235 goto fail;
3236 }
3237 } else {
3238 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3239 switch(type) {
3240 case 1:
3241 case 2:
3242 case 3:
3243 case 4:
3244 case 5:
3245 case 9:
3246 case 11:
3247 case 12:
3248 break;
3249 default:
3250 goto fail;
3251 }
3252 if (dpl < cpl || dpl < rpl) {
3253 fail:
3254 CC_SRC = eflags & ~CC_Z;
3255 return;
3256 }
3257 }
3258 T1 = e2 & 0x00f0ff00;
3259 CC_SRC = eflags | CC_Z;
3260}
3261
3262void helper_verr(void)
3263{
3264 unsigned int selector;
3265 uint32_t e1, e2, eflags;
3266 int rpl, dpl, cpl;
3267
3268 eflags = cc_table[CC_OP].compute_all();
3269 selector = T0 & 0xffff;
3270 if ((selector & 0xfffc) == 0)
3271 goto fail;
3272 if (load_segment(&e1, &e2, selector) != 0)
3273 goto fail;
3274 if (!(e2 & DESC_S_MASK))
3275 goto fail;
3276 rpl = selector & 3;
3277 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3278 cpl = env->hflags & HF_CPL_MASK;
3279 if (e2 & DESC_CS_MASK) {
3280 if (!(e2 & DESC_R_MASK))
3281 goto fail;
3282 if (!(e2 & DESC_C_MASK)) {
3283 if (dpl < cpl || dpl < rpl)
3284 goto fail;
3285 }
3286 } else {
3287 if (dpl < cpl || dpl < rpl) {
3288 fail:
3289 CC_SRC = eflags & ~CC_Z;
3290 return;
3291 }
3292 }
3293 CC_SRC = eflags | CC_Z;
3294}
3295
3296void helper_verw(void)
3297{
3298 unsigned int selector;
3299 uint32_t e1, e2, eflags;
3300 int rpl, dpl, cpl;
3301
3302 eflags = cc_table[CC_OP].compute_all();
3303 selector = T0 & 0xffff;
3304 if ((selector & 0xfffc) == 0)
3305 goto fail;
3306 if (load_segment(&e1, &e2, selector) != 0)
3307 goto fail;
3308 if (!(e2 & DESC_S_MASK))
3309 goto fail;
3310 rpl = selector & 3;
3311 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3312 cpl = env->hflags & HF_CPL_MASK;
3313 if (e2 & DESC_CS_MASK) {
3314 goto fail;
3315 } else {
3316 if (dpl < cpl || dpl < rpl)
3317 goto fail;
3318 if (!(e2 & DESC_W_MASK)) {
3319 fail:
3320 CC_SRC = eflags & ~CC_Z;
3321 return;
3322 }
3323 }
3324 CC_SRC = eflags | CC_Z;
3325}
3326
3327/* FPU helpers */
3328
3329void helper_fldt_ST0_A0(void)
3330{
3331 int new_fpstt;
3332 new_fpstt = (env->fpstt - 1) & 7;
3333 env->fpregs[new_fpstt].d = helper_fldt(A0);
3334 env->fpstt = new_fpstt;
3335 env->fptags[new_fpstt] = 0; /* validate stack entry */
3336}
3337
3338void helper_fstt_ST0_A0(void)
3339{
3340 helper_fstt(ST0, A0);
3341}
3342
3343void fpu_set_exception(int mask)
3344{
3345 env->fpus |= mask;
3346 if (env->fpus & (~env->fpuc & FPUC_EM))
3347 env->fpus |= FPUS_SE | FPUS_B;
3348}
3349
3350CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3351{
3352 if (b == 0.0)
3353 fpu_set_exception(FPUS_ZE);
3354 return a / b;
3355}
3356
3357void fpu_raise_exception(void)
3358{
3359 if (env->cr[0] & CR0_NE_MASK) {
3360 raise_exception(EXCP10_COPR);
3361 }
3362#if !defined(CONFIG_USER_ONLY)
3363 else {
3364 cpu_set_ferr(env);
3365 }
3366#endif
3367}
3368
3369/* BCD ops */
3370
3371void helper_fbld_ST0_A0(void)
3372{
3373 CPU86_LDouble tmp;
3374 uint64_t val;
3375 unsigned int v;
3376 int i;
3377
3378 val = 0;
3379 for(i = 8; i >= 0; i--) {
3380 v = ldub(A0 + i);
3381 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3382 }
3383 tmp = val;
3384 if (ldub(A0 + 9) & 0x80)
3385 tmp = -tmp;
3386 fpush();
3387 ST0 = tmp;
3388}
3389
3390void helper_fbst_ST0_A0(void)
3391{
3392 int v;
3393 target_ulong mem_ref, mem_end;
3394 int64_t val;
3395
3396 val = floatx_to_int64(ST0, &env->fp_status);
3397 mem_ref = A0;
3398 mem_end = mem_ref + 9;
3399 if (val < 0) {
3400 stb(mem_end, 0x80);
3401 val = -val;
3402 } else {
3403 stb(mem_end, 0x00);
3404 }
3405 while (mem_ref < mem_end) {
3406 if (val == 0)
3407 break;
3408 v = val % 100;
3409 val = val / 100;
3410 v = ((v / 10) << 4) | (v % 10);
3411 stb(mem_ref++, v);
3412 }
3413 while (mem_ref < mem_end) {
3414 stb(mem_ref++, 0);
3415 }
3416}
3417
3418void helper_f2xm1(void)
3419{
3420 ST0 = pow(2.0,ST0) - 1.0;
3421}
3422
3423void helper_fyl2x(void)
3424{
3425 CPU86_LDouble fptemp;
3426
3427 fptemp = ST0;
3428 if (fptemp>0.0){
3429 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3430 ST1 *= fptemp;
3431 fpop();
3432 } else {
3433 env->fpus &= (~0x4700);
3434 env->fpus |= 0x400;
3435 }
3436}
3437
3438void helper_fptan(void)
3439{
3440 CPU86_LDouble fptemp;
3441
3442 fptemp = ST0;
3443 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3444 env->fpus |= 0x400;
3445 } else {
3446 ST0 = tan(fptemp);
3447 fpush();
3448 ST0 = 1.0;
3449 env->fpus &= (~0x400); /* C2 <-- 0 */
3450 /* the above code is for |arg| < 2**52 only */
3451 }
3452}
3453
3454void helper_fpatan(void)
3455{
3456 CPU86_LDouble fptemp, fpsrcop;
3457
3458 fpsrcop = ST1;
3459 fptemp = ST0;
3460 ST1 = atan2(fpsrcop,fptemp);
3461 fpop();
3462}
3463
3464void helper_fxtract(void)
3465{
3466 CPU86_LDoubleU temp;
3467 unsigned int expdif;
3468
3469 temp.d = ST0;
3470 expdif = EXPD(temp) - EXPBIAS;
3471 /*DP exponent bias*/
3472 ST0 = expdif;
3473 fpush();
3474 BIASEXPONENT(temp);
3475 ST0 = temp.d;
3476}
3477
3478void helper_fprem1(void)
3479{
3480 CPU86_LDouble dblq, fpsrcop, fptemp;
3481 CPU86_LDoubleU fpsrcop1, fptemp1;
3482 int expdif;
3483 int q;
3484
3485 fpsrcop = ST0;
3486 fptemp = ST1;
3487 fpsrcop1.d = fpsrcop;
3488 fptemp1.d = fptemp;
3489 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3490 if (expdif < 53) {
3491 dblq = fpsrcop / fptemp;
3492 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3493 ST0 = fpsrcop - fptemp*dblq;
3494 q = (int)dblq; /* cutting off top bits is assumed here */
3495 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3496 /* (C0,C1,C3) <-- (q2,q1,q0) */
3497 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3498 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3499 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3500 } else {
3501 env->fpus |= 0x400; /* C2 <-- 1 */
3502 fptemp = pow(2.0, expdif-50);
3503 fpsrcop = (ST0 / ST1) / fptemp;
3504 /* fpsrcop = integer obtained by rounding to the nearest */
3505 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
3506 floor(fpsrcop): ceil(fpsrcop);
3507 ST0 -= (ST1 * fpsrcop * fptemp);
3508 }
3509}
3510
3511void helper_fprem(void)
3512{
3513 CPU86_LDouble dblq, fpsrcop, fptemp;
3514 CPU86_LDoubleU fpsrcop1, fptemp1;
3515 int expdif;
3516 int q;
3517
3518 fpsrcop = ST0;
3519 fptemp = ST1;
3520 fpsrcop1.d = fpsrcop;
3521 fptemp1.d = fptemp;
3522 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3523 if ( expdif < 53 ) {
3524 dblq = fpsrcop / fptemp;
3525 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
3526 ST0 = fpsrcop - fptemp*dblq;
3527 q = (int)dblq; /* cutting off top bits is assumed here */
3528 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3529 /* (C0,C1,C3) <-- (q2,q1,q0) */
3530 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
3531 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
3532 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
3533 } else {
3534 env->fpus |= 0x400; /* C2 <-- 1 */
3535 fptemp = pow(2.0, expdif-50);
3536 fpsrcop = (ST0 / ST1) / fptemp;
3537 /* fpsrcop = integer obtained by chopping */
3538 fpsrcop = (fpsrcop < 0.0)?
3539 -(floor(fabs(fpsrcop))): floor(fpsrcop);
3540 ST0 -= (ST1 * fpsrcop * fptemp);
3541 }
3542}
3543
3544void helper_fyl2xp1(void)
3545{
3546 CPU86_LDouble fptemp;
3547
3548 fptemp = ST0;
3549 if ((fptemp+1.0)>0.0) {
3550 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3551 ST1 *= fptemp;
3552 fpop();
3553 } else {
3554 env->fpus &= (~0x4700);
3555 env->fpus |= 0x400;
3556 }
3557}
3558
3559void helper_fsqrt(void)
3560{
3561 CPU86_LDouble fptemp;
3562
3563 fptemp = ST0;
3564 if (fptemp<0.0) {
3565 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3566 env->fpus |= 0x400;
3567 }
3568 ST0 = sqrt(fptemp);
3569}
3570
3571void helper_fsincos(void)
3572{
3573 CPU86_LDouble fptemp;
3574
3575 fptemp = ST0;
3576 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3577 env->fpus |= 0x400;
3578 } else {
3579 ST0 = sin(fptemp);
3580 fpush();
3581 ST0 = cos(fptemp);
3582 env->fpus &= (~0x400); /* C2 <-- 0 */
3583 /* the above code is for |arg| < 2**63 only */
3584 }
3585}
3586
3587void helper_frndint(void)
3588{
3589 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3590}
3591
3592void helper_fscale(void)
3593{
3594 ST0 = ldexp (ST0, (int)(ST1));
3595}
3596
3597void helper_fsin(void)
3598{
3599 CPU86_LDouble fptemp;
3600
3601 fptemp = ST0;
3602 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3603 env->fpus |= 0x400;
3604 } else {
3605 ST0 = sin(fptemp);
3606 env->fpus &= (~0x400); /* C2 <-- 0 */
3607 /* the above code is for |arg| < 2**53 only */
3608 }
3609}
3610
3611void helper_fcos(void)
3612{
3613 CPU86_LDouble fptemp;
3614
3615 fptemp = ST0;
3616 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3617 env->fpus |= 0x400;
3618 } else {
3619 ST0 = cos(fptemp);
3620 env->fpus &= (~0x400); /* C2 <-- 0 */
3621 /* the above code is for |arg5 < 2**63 only */
3622 }
3623}
3624
3625void helper_fxam_ST0(void)
3626{
3627 CPU86_LDoubleU temp;
3628 int expdif;
3629
3630 temp.d = ST0;
3631
3632 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3633 if (SIGND(temp))
3634 env->fpus |= 0x200; /* C1 <-- 1 */
3635
3636 /* XXX: test fptags too */
3637 expdif = EXPD(temp);
3638 if (expdif == MAXEXPD) {
3639#ifdef USE_X86LDOUBLE
3640 if (MANTD(temp) == 0x8000000000000000ULL)
3641#else
3642 if (MANTD(temp) == 0)
3643#endif
3644 env->fpus |= 0x500 /*Infinity*/;
3645 else
3646 env->fpus |= 0x100 /*NaN*/;
3647 } else if (expdif == 0) {
3648 if (MANTD(temp) == 0)
3649 env->fpus |= 0x4000 /*Zero*/;
3650 else
3651 env->fpus |= 0x4400 /*Denormal*/;
3652 } else {
3653 env->fpus |= 0x400;
3654 }
3655}
3656
3657void helper_fstenv(target_ulong ptr, int data32)
3658{
3659 int fpus, fptag, exp, i;
3660 uint64_t mant;
3661 CPU86_LDoubleU tmp;
3662
3663 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3664 fptag = 0;
3665 for (i=7; i>=0; i--) {
3666 fptag <<= 2;
3667 if (env->fptags[i]) {
3668 fptag |= 3;
3669 } else {
3670 tmp.d = env->fpregs[i].d;
3671 exp = EXPD(tmp);
3672 mant = MANTD(tmp);
3673 if (exp == 0 && mant == 0) {
3674 /* zero */
3675 fptag |= 1;
3676 } else if (exp == 0 || exp == MAXEXPD
3677#ifdef USE_X86LDOUBLE
3678 || (mant & (1LL << 63)) == 0
3679#endif
3680 ) {
3681 /* NaNs, infinity, denormal */
3682 fptag |= 2;
3683 }
3684 }
3685 }
3686 if (data32) {
3687 /* 32 bit */
3688 stl(ptr, env->fpuc);
3689 stl(ptr + 4, fpus);
3690 stl(ptr + 8, fptag);
3691 stl(ptr + 12, 0); /* fpip */
3692 stl(ptr + 16, 0); /* fpcs */
3693 stl(ptr + 20, 0); /* fpoo */
3694 stl(ptr + 24, 0); /* fpos */
3695 } else {
3696 /* 16 bit */
3697 stw(ptr, env->fpuc);
3698 stw(ptr + 2, fpus);
3699 stw(ptr + 4, fptag);
3700 stw(ptr + 6, 0);
3701 stw(ptr + 8, 0);
3702 stw(ptr + 10, 0);
3703 stw(ptr + 12, 0);
3704 }
3705}
3706
3707void helper_fldenv(target_ulong ptr, int data32)
3708{
3709 int i, fpus, fptag;
3710
3711 if (data32) {
3712 env->fpuc = lduw(ptr);
3713 fpus = lduw(ptr + 4);
3714 fptag = lduw(ptr + 8);
3715 }
3716 else {
3717 env->fpuc = lduw(ptr);
3718 fpus = lduw(ptr + 2);
3719 fptag = lduw(ptr + 4);
3720 }
3721 env->fpstt = (fpus >> 11) & 7;
3722 env->fpus = fpus & ~0x3800;
3723 for(i = 0;i < 8; i++) {
3724 env->fptags[i] = ((fptag & 3) == 3);
3725 fptag >>= 2;
3726 }
3727}
3728
3729void helper_fsave(target_ulong ptr, int data32)
3730{
3731 CPU86_LDouble tmp;
3732 int i;
3733
3734 helper_fstenv(ptr, data32);
3735
3736 ptr += (14 << data32);
3737 for(i = 0;i < 8; i++) {
3738 tmp = ST(i);
3739 helper_fstt(tmp, ptr);
3740 ptr += 10;
3741 }
3742
3743 /* fninit */
3744 env->fpus = 0;
3745 env->fpstt = 0;
3746 env->fpuc = 0x37f;
3747 env->fptags[0] = 1;
3748 env->fptags[1] = 1;
3749 env->fptags[2] = 1;
3750 env->fptags[3] = 1;
3751 env->fptags[4] = 1;
3752 env->fptags[5] = 1;
3753 env->fptags[6] = 1;
3754 env->fptags[7] = 1;
3755}
3756
3757void helper_frstor(target_ulong ptr, int data32)
3758{
3759 CPU86_LDouble tmp;
3760 int i;
3761
3762 helper_fldenv(ptr, data32);
3763 ptr += (14 << data32);
3764
3765 for(i = 0;i < 8; i++) {
3766 tmp = helper_fldt(ptr);
3767 ST(i) = tmp;
3768 ptr += 10;
3769 }
3770}
3771
3772void helper_fxsave(target_ulong ptr, int data64)
3773{
3774 int fpus, fptag, i, nb_xmm_regs;
3775 CPU86_LDouble tmp;
3776 target_ulong addr;
3777
3778 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3779 fptag = 0;
3780 for(i = 0; i < 8; i++) {
3781 fptag |= (env->fptags[i] << i);
3782 }
3783 stw(ptr, env->fpuc);
3784 stw(ptr + 2, fpus);
3785 stw(ptr + 4, fptag ^ 0xff);
3786
3787 addr = ptr + 0x20;
3788 for(i = 0;i < 8; i++) {
3789 tmp = ST(i);
3790 helper_fstt(tmp, addr);
3791 addr += 16;
3792 }
3793
3794 if (env->cr[4] & CR4_OSFXSR_MASK) {
3795 /* XXX: finish it */
3796 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3797 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3798 nb_xmm_regs = 8 << data64;
3799 addr = ptr + 0xa0;
3800 for(i = 0; i < nb_xmm_regs; i++) {
3801 stq(addr, env->xmm_regs[i].XMM_Q(0));
3802 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3803 addr += 16;
3804 }
3805 }
3806}
3807
3808void helper_fxrstor(target_ulong ptr, int data64)
3809{
3810 int i, fpus, fptag, nb_xmm_regs;
3811 CPU86_LDouble tmp;
3812 target_ulong addr;
3813
3814 env->fpuc = lduw(ptr);
3815 fpus = lduw(ptr + 2);
3816 fptag = lduw(ptr + 4);
3817 env->fpstt = (fpus >> 11) & 7;
3818 env->fpus = fpus & ~0x3800;
3819 fptag ^= 0xff;
3820 for(i = 0;i < 8; i++) {
3821 env->fptags[i] = ((fptag >> i) & 1);
3822 }
3823
3824 addr = ptr + 0x20;
3825 for(i = 0;i < 8; i++) {
3826 tmp = helper_fldt(addr);
3827 ST(i) = tmp;
3828 addr += 16;
3829 }
3830
3831 if (env->cr[4] & CR4_OSFXSR_MASK) {
3832 /* XXX: finish it */
3833 env->mxcsr = ldl(ptr + 0x18);
3834 //ldl(ptr + 0x1c);
3835 nb_xmm_regs = 8 << data64;
3836 addr = ptr + 0xa0;
3837 for(i = 0; i < nb_xmm_regs; i++) {
3838#if !defined(VBOX) || __GNUC__ < 4
3839 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3840 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3841#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
3842# if 1
3843 env->xmm_regs[i].XMM_L(0) = ldl(addr);
3844 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
3845 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
3846 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
3847# else
3848 /* this works fine on Mac OS X, gcc 4.0.1 */
3849 uint64_t u64 = ldq(addr);
3850 env->xmm_regs[i].XMM_Q(0);
3851 u64 = ldq(addr + 4);
3852 env->xmm_regs[i].XMM_Q(1) = u64;
3853# endif
3854#endif
3855 addr += 16;
3856 }
3857 }
3858}
3859
3860#ifndef USE_X86LDOUBLE
3861
3862void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3863{
3864 CPU86_LDoubleU temp;
3865 int e;
3866
3867 temp.d = f;
3868 /* mantissa */
3869 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3870 /* exponent + sign */
3871 e = EXPD(temp) - EXPBIAS + 16383;
3872 e |= SIGND(temp) >> 16;
3873 *pexp = e;
3874}
3875
3876CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3877{
3878 CPU86_LDoubleU temp;
3879 int e;
3880 uint64_t ll;
3881
3882 /* XXX: handle overflow ? */
3883 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3884 e |= (upper >> 4) & 0x800; /* sign */
3885 ll = (mant >> 11) & ((1LL << 52) - 1);
3886#ifdef __arm__
3887 temp.l.upper = (e << 20) | (ll >> 32);
3888 temp.l.lower = ll;
3889#else
3890 temp.ll = ll | ((uint64_t)e << 52);
3891#endif
3892 return temp.d;
3893}
3894
3895#else
3896
3897void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3898{
3899 CPU86_LDoubleU temp;
3900
3901 temp.d = f;
3902 *pmant = temp.l.lower;
3903 *pexp = temp.l.upper;
3904}
3905
3906CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3907{
3908 CPU86_LDoubleU temp;
3909
3910 temp.l.upper = upper;
3911 temp.l.lower = mant;
3912 return temp.d;
3913}
3914#endif
3915
3916#ifdef TARGET_X86_64
3917
3918//#define DEBUG_MULDIV
3919
3920static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3921{
3922 *plow += a;
3923 /* carry test */
3924 if (*plow < a)
3925 (*phigh)++;
3926 *phigh += b;
3927}
3928
3929static void neg128(uint64_t *plow, uint64_t *phigh)
3930{
3931 *plow = ~ *plow;
3932 *phigh = ~ *phigh;
3933 add128(plow, phigh, 1, 0);
3934}
3935
3936static void mul64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3937{
3938 uint32_t a0, a1, b0, b1;
3939 uint64_t v;
3940
3941 a0 = a;
3942 a1 = a >> 32;
3943
3944 b0 = b;
3945 b1 = b >> 32;
3946
3947 v = (uint64_t)a0 * (uint64_t)b0;
3948 *plow = v;
3949 *phigh = 0;
3950
3951 v = (uint64_t)a0 * (uint64_t)b1;
3952 add128(plow, phigh, v << 32, v >> 32);
3953
3954 v = (uint64_t)a1 * (uint64_t)b0;
3955 add128(plow, phigh, v << 32, v >> 32);
3956
3957 v = (uint64_t)a1 * (uint64_t)b1;
3958 *phigh += v;
3959#ifdef DEBUG_MULDIV
3960 printf("mul: 0x%016" PRIx64 " * 0x%016" PRIx64 " = 0x%016" PRIx64 "%016" PRIx64 "\n",
3961 a, b, *phigh, *plow);
3962#endif
3963}
3964
3965static void imul64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
3966{
3967 int sa, sb;
3968 sa = (a < 0);
3969 if (sa)
3970 a = -a;
3971 sb = (b < 0);
3972 if (sb)
3973 b = -b;
3974 mul64(plow, phigh, a, b);
3975 if (sa ^ sb) {
3976 neg128(plow, phigh);
3977 }
3978}
3979
3980/* return TRUE if overflow */
3981static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3982{
3983 uint64_t q, r, a1, a0;
3984 int i, qb, ab;
3985
3986 a0 = *plow;
3987 a1 = *phigh;
3988 if (a1 == 0) {
3989 q = a0 / b;
3990 r = a0 % b;
3991 *plow = q;
3992 *phigh = r;
3993 } else {
3994 if (a1 >= b)
3995 return 1;
3996 /* XXX: use a better algorithm */
3997 for(i = 0; i < 64; i++) {
3998 ab = a1 >> 63;
3999 a1 = (a1 << 1) | (a0 >> 63);
4000 if (ab || a1 >= b) {
4001 a1 -= b;
4002 qb = 1;
4003 } else {
4004 qb = 0;
4005 }
4006 a0 = (a0 << 1) | qb;
4007 }
4008#if defined(DEBUG_MULDIV)
4009 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4010 *phigh, *plow, b, a0, a1);
4011#endif
4012 *plow = a0;
4013 *phigh = a1;
4014 }
4015 return 0;
4016}
4017
4018/* return TRUE if overflow */
4019static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4020{
4021 int sa, sb;
4022 sa = ((int64_t)*phigh < 0);
4023 if (sa)
4024 neg128(plow, phigh);
4025 sb = (b < 0);
4026 if (sb)
4027 b = -b;
4028 if (div64(plow, phigh, b) != 0)
4029 return 1;
4030 if (sa ^ sb) {
4031 if (*plow > (1ULL << 63))
4032 return 1;
4033 *plow = - *plow;
4034 } else {
4035 if (*plow >= (1ULL << 63))
4036 return 1;
4037 }
4038 if (sa)
4039 *phigh = - *phigh;
4040 return 0;
4041}
4042
4043void helper_mulq_EAX_T0(void)
4044{
4045 uint64_t r0, r1;
4046
4047 mul64(&r0, &r1, EAX, T0);
4048 EAX = r0;
4049 EDX = r1;
4050 CC_DST = r0;
4051 CC_SRC = r1;
4052}
4053
4054void helper_imulq_EAX_T0(void)
4055{
4056 uint64_t r0, r1;
4057
4058 imul64(&r0, &r1, EAX, T0);
4059 EAX = r0;
4060 EDX = r1;
4061 CC_DST = r0;
4062 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4063}
4064
4065void helper_imulq_T0_T1(void)
4066{
4067 uint64_t r0, r1;
4068
4069 imul64(&r0, &r1, T0, T1);
4070 T0 = r0;
4071 CC_DST = r0;
4072 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4073}
4074
4075void helper_divq_EAX_T0(void)
4076{
4077 uint64_t r0, r1;
4078 if (T0 == 0) {
4079 raise_exception(EXCP00_DIVZ);
4080 }
4081 r0 = EAX;
4082 r1 = EDX;
4083 if (div64(&r0, &r1, T0))
4084 raise_exception(EXCP00_DIVZ);
4085 EAX = r0;
4086 EDX = r1;
4087}
4088
4089void helper_idivq_EAX_T0(void)
4090{
4091 uint64_t r0, r1;
4092 if (T0 == 0) {
4093 raise_exception(EXCP00_DIVZ);
4094 }
4095 r0 = EAX;
4096 r1 = EDX;
4097 if (idiv64(&r0, &r1, T0))
4098 raise_exception(EXCP00_DIVZ);
4099 EAX = r0;
4100 EDX = r1;
4101}
4102
4103void helper_bswapq_T0(void)
4104{
4105 T0 = bswap64(T0);
4106}
4107#endif
4108
4109void helper_hlt(void)
4110{
4111 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4112 env->hflags |= HF_HALTED_MASK;
4113 env->exception_index = EXCP_HLT;
4114 cpu_loop_exit();
4115}
4116
4117void helper_monitor(void)
4118{
4119 if ((uint32_t)ECX != 0)
4120 raise_exception(EXCP0D_GPF);
4121 /* XXX: store address ? */
4122}
4123
4124void helper_mwait(void)
4125{
4126 if ((uint32_t)ECX != 0)
4127 raise_exception(EXCP0D_GPF);
4128#ifdef VBOX
4129 helper_hlt();
4130#else
4131 /* XXX: not complete but not completely erroneous */
4132 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4133 /* more than one CPU: do not sleep because another CPU may
4134 wake this one */
4135 } else {
4136 helper_hlt();
4137 }
4138#endif
4139}
4140
4141float approx_rsqrt(float a)
4142{
4143 return 1.0 / sqrt(a);
4144}
4145
4146float approx_rcp(float a)
4147{
4148 return 1.0 / a;
4149}
4150
4151void update_fp_status(void)
4152{
4153 int rnd_type;
4154
4155 /* set rounding mode */
4156 switch(env->fpuc & RC_MASK) {
4157 default:
4158 case RC_NEAR:
4159 rnd_type = float_round_nearest_even;
4160 break;
4161 case RC_DOWN:
4162 rnd_type = float_round_down;
4163 break;
4164 case RC_UP:
4165 rnd_type = float_round_up;
4166 break;
4167 case RC_CHOP:
4168 rnd_type = float_round_to_zero;
4169 break;
4170 }
4171 set_float_rounding_mode(rnd_type, &env->fp_status);
4172#ifdef FLOATX80
4173 switch((env->fpuc >> 8) & 3) {
4174 case 0:
4175 rnd_type = 32;
4176 break;
4177 case 2:
4178 rnd_type = 64;
4179 break;
4180 case 3:
4181 default:
4182 rnd_type = 80;
4183 break;
4184 }
4185 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4186#endif
4187}
4188
4189#if !defined(CONFIG_USER_ONLY)
4190
4191#define MMUSUFFIX _mmu
4192#define GETPC() (__builtin_return_address(0))
4193
4194#define SHIFT 0
4195#include "softmmu_template.h"
4196
4197#define SHIFT 1
4198#include "softmmu_template.h"
4199
4200#define SHIFT 2
4201#include "softmmu_template.h"
4202
4203#define SHIFT 3
4204#include "softmmu_template.h"
4205
4206#endif
4207
4208/* try to fill the TLB and return an exception if error. If retaddr is
4209 NULL, it means that the function was called in C code (i.e. not
4210 from generated code or from helper.c) */
4211/* XXX: fix it to restore all registers */
4212void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
4213{
4214 TranslationBlock *tb;
4215 int ret;
4216 unsigned long pc;
4217 CPUX86State *saved_env;
4218
4219 /* XXX: hack to restore env in all cases, even if not called from
4220 generated code */
4221 saved_env = env;
4222 env = cpu_single_env;
4223
4224 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
4225 if (ret) {
4226 if (retaddr) {
4227 /* now we have a real cpu fault */
4228 pc = (unsigned long)retaddr;
4229 tb = tb_find_pc(pc);
4230 if (tb) {
4231 /* the PC is inside the translated code. It means that we have
4232 a virtual CPU fault */
4233 cpu_restore_state(tb, env, pc, NULL);
4234 }
4235 }
4236 if (retaddr)
4237 raise_exception_err(env->exception_index, env->error_code);
4238 else
4239 raise_exception_err_norestore(env->exception_index, env->error_code);
4240 }
4241 env = saved_env;
4242}
4243
4244#ifdef VBOX
4245
4246/**
4247 * Correctly computes the eflags.
4248 * @returns eflags.
4249 * @param env1 CPU environment.
4250 */
4251uint32_t raw_compute_eflags(CPUX86State *env1)
4252{
4253 CPUX86State *savedenv = env;
4254 env = env1;
4255 uint32_t efl = compute_eflags();
4256 env = savedenv;
4257 return efl;
4258}
4259
4260/**
4261 * Reads byte from virtual address in guest memory area.
4262 * XXX: is it working for any addresses? swapped out pages?
4263 * @returns readed data byte.
4264 * @param env1 CPU environment.
4265 * @param pvAddr GC Virtual address.
4266 */
4267uint8_t read_byte(CPUX86State *env1, target_ulong addr)
4268{
4269 CPUX86State *savedenv = env;
4270 env = env1;
4271 uint8_t u8 = ldub_kernel(addr);
4272 env = savedenv;
4273 return u8;
4274}
4275
4276/**
4277 * Reads byte from virtual address in guest memory area.
4278 * XXX: is it working for any addresses? swapped out pages?
4279 * @returns readed data byte.
4280 * @param env1 CPU environment.
4281 * @param pvAddr GC Virtual address.
4282 */
4283uint16_t read_word(CPUX86State *env1, target_ulong addr)
4284{
4285 CPUX86State *savedenv = env;
4286 env = env1;
4287 uint16_t u16 = lduw_kernel(addr);
4288 env = savedenv;
4289 return u16;
4290}
4291
4292/**
4293 * Reads byte from virtual address in guest memory area.
4294 * XXX: is it working for any addresses? swapped out pages?
4295 * @returns readed data byte.
4296 * @param env1 CPU environment.
4297 * @param pvAddr GC Virtual address.
4298 */
4299uint32_t read_dword(CPUX86State *env1, target_ulong addr)
4300{
4301 CPUX86State *savedenv = env;
4302 env = env1;
4303 uint32_t u32 = ldl_kernel(addr);
4304 env = savedenv;
4305 return u32;
4306}
4307
4308/**
4309 * Writes byte to virtual address in guest memory area.
4310 * XXX: is it working for any addresses? swapped out pages?
4311 * @returns readed data byte.
4312 * @param env1 CPU environment.
4313 * @param pvAddr GC Virtual address.
4314 * @param val byte value
4315 */
4316void write_byte(CPUX86State *env1, target_ulong addr, uint8_t val)
4317{
4318 CPUX86State *savedenv = env;
4319 env = env1;
4320 stb(addr, val);
4321 env = savedenv;
4322}
4323
4324void write_word(CPUX86State *env1, target_ulong addr, uint16_t val)
4325{
4326 CPUX86State *savedenv = env;
4327 env = env1;
4328 stw(addr, val);
4329 env = savedenv;
4330}
4331
4332void write_dword(CPUX86State *env1, target_ulong addr, uint32_t val)
4333{
4334 CPUX86State *savedenv = env;
4335 env = env1;
4336 stl(addr, val);
4337 env = savedenv;
4338}
4339
4340/**
4341 * Correctly loads selector into segment register with updating internal
4342 * qemu data/caches.
4343 * @param env1 CPU environment.
4344 * @param seg_reg Segment register.
4345 * @param selector Selector to load.
4346 */
4347void sync_seg(CPUX86State *env1, int seg_reg, int selector)
4348{
4349 CPUX86State *savedenv = env;
4350 env = env1;
4351
4352 if ( env->eflags & X86_EFL_VM
4353 || !(env->cr[0] & X86_CR0_PE))
4354 {
4355 load_seg_vm(seg_reg, selector);
4356
4357 env = savedenv;
4358
4359 /* Successful sync. */
4360 env1->segs[seg_reg].newselector = 0;
4361 }
4362 else
4363 {
4364 if (setjmp(env1->jmp_env) == 0)
4365 {
4366 if (seg_reg == R_CS)
4367 {
4368 uint32_t e1, e2;
4369 load_segment(&e1, &e2, selector);
4370 cpu_x86_load_seg_cache(env, R_CS, selector,
4371 get_seg_base(e1, e2),
4372 get_seg_limit(e1, e2),
4373 e2);
4374 }
4375 else
4376 load_seg(seg_reg, selector);
4377 env = savedenv;
4378
4379 /* Successful sync. */
4380 env1->segs[seg_reg].newselector = 0;
4381 }
4382 else
4383 {
4384 env = savedenv;
4385
4386 /* Postpone sync until the guest uses the selector. */
4387 env1->segs[seg_reg].selector = selector; /* hidden values are now incorrect, but will be resynced when this register is accessed. */
4388 env1->segs[seg_reg].newselector = selector;
4389 Log(("sync_seg: out of sync seg_reg=%d selector=%#x\n", seg_reg, selector));
4390 }
4391 }
4392
4393}
4394
4395
4396/**
4397 * Correctly loads a new ldtr selector.
4398 *
4399 * @param env1 CPU environment.
4400 * @param selector Selector to load.
4401 */
4402void sync_ldtr(CPUX86State *env1, int selector)
4403{
4404 CPUX86State *saved_env = env;
4405 target_ulong saved_T0 = T0;
4406 if (setjmp(env1->jmp_env) == 0)
4407 {
4408 env = env1;
4409 T0 = selector;
4410 helper_lldt_T0();
4411 T0 = saved_T0;
4412 env = saved_env;
4413 }
4414 else
4415 {
4416 T0 = saved_T0;
4417 env = saved_env;
4418#ifdef VBOX_STRICT
4419 cpu_abort(env1, "sync_ldtr: selector=%#x\n", selector);
4420#endif
4421 }
4422}
4423
4424/**
4425 * Correctly loads a new tr selector.
4426 *
4427 * @param env1 CPU environment.
4428 * @param selector Selector to load.
4429 */
4430int sync_tr(CPUX86State *env1, int selector)
4431{
4432 /* ARG! this was going to call helper_ltr_T0 but that won't work because of busy flag. */
4433 SegmentCache *dt;
4434 uint32_t e1, e2;
4435 int index, type, entry_limit;
4436 target_ulong ptr;
4437 CPUX86State *saved_env = env;
4438 env = env1;
4439
4440 selector &= 0xffff;
4441 if ((selector & 0xfffc) == 0) {
4442 /* NULL selector case: invalid TR */
4443 env->tr.base = 0;
4444 env->tr.limit = 0;
4445 env->tr.flags = 0;
4446 } else {
4447 if (selector & 0x4)
4448 goto l_failure;
4449 dt = &env->gdt;
4450 index = selector & ~7;
4451#ifdef TARGET_X86_64
4452 if (env->hflags & HF_LMA_MASK)
4453 entry_limit = 15;
4454 else
4455#endif
4456 entry_limit = 7;
4457 if ((index + entry_limit) > dt->limit)
4458 goto l_failure;
4459 ptr = dt->base + index;
4460 e1 = ldl_kernel(ptr);
4461 e2 = ldl_kernel(ptr + 4);
4462 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
4463 if ((e2 & DESC_S_MASK) /*||
4464 (type != 1 && type != 9)*/)
4465 goto l_failure;
4466 if (!(e2 & DESC_P_MASK))
4467 goto l_failure;
4468#ifdef TARGET_X86_64
4469 if (env->hflags & HF_LMA_MASK) {
4470 uint32_t e3;
4471 e3 = ldl_kernel(ptr + 8);
4472 load_seg_cache_raw_dt(&env->tr, e1, e2);
4473 env->tr.base |= (target_ulong)e3 << 32;
4474 } else
4475#endif
4476 {
4477 load_seg_cache_raw_dt(&env->tr, e1, e2);
4478 }
4479 e2 |= DESC_TSS_BUSY_MASK;
4480 stl_kernel(ptr + 4, e2);
4481 }
4482 env->tr.selector = selector;
4483
4484 env = saved_env;
4485 return 0;
4486l_failure:
4487 AssertMsgFailed(("selector=%d\n", selector));
4488 return -1;
4489}
4490
4491int emulate_single_instr(CPUX86State *env1)
4492{
4493#if 1 /* single stepping is broken when using a static tb... feel free to figure out why. :-) */
4494 /* This has to be static because it needs to be addressible
4495 using 32-bit immediate addresses on 64-bit machines. This
4496 is dictated by the gcc code model used when building this
4497 module / op.o. Using a static here pushes the problem
4498 onto the module loader. */
4499 static TranslationBlock tb_temp;
4500#endif
4501 TranslationBlock *tb;
4502 TranslationBlock *current;
4503 int csize;
4504 void (*gen_func)(void);
4505 uint8_t *tc_ptr;
4506 target_ulong old_eip;
4507
4508 /* ensures env is loaded in ebp! */
4509 CPUX86State *savedenv = env;
4510 env = env1;
4511
4512 RAWEx_ProfileStart(env, STATS_EMULATE_SINGLE_INSTR);
4513
4514#if 1 /* see above */
4515 tc_ptr = env->pvCodeBuffer;
4516#else
4517 tc_ptr = code_gen_ptr;
4518#endif
4519
4520 /*
4521 * Setup temporary translation block.
4522 */
4523 /* tb_alloc: */
4524#if 1 /* see above */
4525 tb = &tb_temp;
4526 tb->pc = env->segs[R_CS].base + env->eip;
4527 tb->cflags = 0;
4528#else
4529 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4530 if (!tb)
4531 {
4532 tb_flush(env);
4533 tb = tb_alloc(env->segs[R_CS].base + env->eip);
4534 }
4535#endif
4536
4537 /* tb_find_slow: */
4538 tb->tc_ptr = tc_ptr;
4539 tb->cs_base = env->segs[R_CS].base;
4540 tb->flags = env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
4541
4542 /* Initialize the rest with sensible values. */
4543 tb->size = 0;
4544 tb->phys_hash_next = NULL;
4545 tb->page_next[0] = NULL;
4546 tb->page_next[1] = NULL;
4547 tb->page_addr[0] = 0;
4548 tb->page_addr[1] = 0;
4549 tb->tb_next_offset[0] = 0xffff;
4550 tb->tb_next_offset[1] = 0xffff;
4551 tb->tb_next[0] = 0xffff;
4552 tb->tb_next[1] = 0xffff;
4553 tb->jmp_next[0] = NULL;
4554 tb->jmp_next[1] = NULL;
4555 tb->jmp_first = NULL;
4556
4557 current = env->current_tb;
4558 env->current_tb = NULL;
4559
4560 /*
4561 * Translate only one instruction.
4562 */
4563 ASMAtomicOrU32(&env->state, CPU_EMULATE_SINGLE_INSTR);
4564 if (cpu_gen_code(env, tb, env->cbCodeBuffer, &csize) < 0)
4565 {
4566 AssertFailed();
4567 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4568 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4569 env = savedenv;
4570 return -1;
4571 }
4572#ifdef DEBUG
4573 if(csize > env->cbCodeBuffer)
4574 {
4575 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4576 AssertFailed();
4577 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4578 env = savedenv;
4579 return -1;
4580 }
4581 if (tb->tc_ptr != tc_ptr)
4582 {
4583 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4584 AssertFailed();
4585 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4586 env = savedenv;
4587 return -1;
4588 }
4589#endif
4590 ASMAtomicAndU32(&env->state, ~CPU_EMULATE_SINGLE_INSTR);
4591
4592 /* tb_link_phys: */
4593 tb->jmp_first = (TranslationBlock *)((intptr_t)tb | 2);
4594 Assert(tb->jmp_next[0] == NULL); Assert(tb->jmp_next[1] == NULL);
4595 if (tb->tb_next_offset[0] != 0xffff)
4596 tb_set_jmp_target(tb, 0, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[0]));
4597 if (tb->tb_next_offset[1] != 0xffff)
4598 tb_set_jmp_target(tb, 1, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[1]));
4599
4600 /*
4601 * Execute it using emulation
4602 */
4603 old_eip = env->eip;
4604 gen_func = (void *)tb->tc_ptr;
4605 env->current_tb = tb;
4606
4607 // eip remains the same for repeated instructions; no idea why qemu doesn't do a jump inside the generated code
4608 // perhaps not a very safe hack
4609 while(old_eip == env->eip)
4610 {
4611 gen_func();
4612 /*
4613 * Exit once we detect an external interrupt and interrupts are enabled
4614 */
4615 if( (env->interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT|CPU_INTERRUPT_EXTERNAL_TIMER)) ||
4616 ( (env->eflags & IF_MASK) &&
4617 !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
4618 (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_HARD) ) )
4619 {
4620 break;
4621 }
4622 }
4623 env->current_tb = current;
4624
4625 Assert(tb->phys_hash_next == NULL);
4626 Assert(tb->page_next[0] == NULL);
4627 Assert(tb->page_next[1] == NULL);
4628 Assert(tb->page_addr[0] == 0);
4629 Assert(tb->page_addr[1] == 0);
4630/*
4631 Assert(tb->tb_next_offset[0] == 0xffff);
4632 Assert(tb->tb_next_offset[1] == 0xffff);
4633 Assert(tb->tb_next[0] == 0xffff);
4634 Assert(tb->tb_next[1] == 0xffff);
4635 Assert(tb->jmp_next[0] == NULL);
4636 Assert(tb->jmp_next[1] == NULL);
4637 Assert(tb->jmp_first == NULL); */
4638
4639 RAWEx_ProfileStop(env, STATS_EMULATE_SINGLE_INSTR);
4640
4641 /*
4642 * Execute the next instruction when we encounter instruction fusing.
4643 */
4644 if (env->hflags & HF_INHIBIT_IRQ_MASK)
4645 {
4646 Log(("REM: Emulating next instruction due to instruction fusing (HF_INHIBIT_IRQ_MASK) at %VGv\n", env->eip));
4647 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4648 emulate_single_instr(env);
4649 }
4650
4651 env = savedenv;
4652 return 0;
4653}
4654
4655int get_ss_esp_from_tss_raw(CPUX86State *env1, uint32_t *ss_ptr,
4656 uint32_t *esp_ptr, int dpl)
4657{
4658 int type, index, shift;
4659
4660 CPUX86State *savedenv = env;
4661 env = env1;
4662
4663 if (!(env->tr.flags & DESC_P_MASK))
4664 cpu_abort(env, "invalid tss");
4665 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
4666 if ((type & 7) != 1)
4667 cpu_abort(env, "invalid tss type %d", type);
4668 shift = type >> 3;
4669 index = (dpl * 4 + 2) << shift;
4670 if (index + (4 << shift) - 1 > env->tr.limit)
4671 {
4672 env = savedenv;
4673 return 0;
4674 }
4675 //raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
4676
4677 if (shift == 0) {
4678 *esp_ptr = lduw_kernel(env->tr.base + index);
4679 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
4680 } else {
4681 *esp_ptr = ldl_kernel(env->tr.base + index);
4682 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
4683 }
4684
4685 env = savedenv;
4686 return 1;
4687}
4688
4689//*****************************************************************************
4690// Needs to be at the bottom of the file (overriding macros)
4691
4692static inline CPU86_LDouble helper_fldt_raw(uint8_t *ptr)
4693{
4694 return *(CPU86_LDouble *)ptr;
4695}
4696
4697static inline void helper_fstt_raw(CPU86_LDouble f, uint8_t *ptr)
4698{
4699 *(CPU86_LDouble *)ptr = f;
4700}
4701
4702#undef stw
4703#undef stl
4704#undef stq
4705#define stw(a,b) *(uint16_t *)(a) = (uint16_t)(b)
4706#define stl(a,b) *(uint32_t *)(a) = (uint32_t)(b)
4707#define stq(a,b) *(uint64_t *)(a) = (uint64_t)(b)
4708#define data64 0
4709
4710//*****************************************************************************
4711void restore_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4712{
4713 int fpus, fptag, i, nb_xmm_regs;
4714 CPU86_LDouble tmp;
4715 uint8_t *addr;
4716
4717 if (env->cpuid_features & CPUID_FXSR)
4718 {
4719 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4720 fptag = 0;
4721 for(i = 0; i < 8; i++) {
4722 fptag |= (env->fptags[i] << i);
4723 }
4724 stw(ptr, env->fpuc);
4725 stw(ptr + 2, fpus);
4726 stw(ptr + 4, fptag ^ 0xff);
4727
4728 addr = ptr + 0x20;
4729 for(i = 0;i < 8; i++) {
4730 tmp = ST(i);
4731 helper_fstt_raw(tmp, addr);
4732 addr += 16;
4733 }
4734
4735 if (env->cr[4] & CR4_OSFXSR_MASK) {
4736 /* XXX: finish it */
4737 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4738 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4739 nb_xmm_regs = 8 << data64;
4740 addr = ptr + 0xa0;
4741 for(i = 0; i < nb_xmm_regs; i++) {
4742#if __GNUC__ < 4
4743 stq(addr, env->xmm_regs[i].XMM_Q(0));
4744 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4745#else /* VBOX + __GNUC__ >= 4: gcc 4.x compiler bug - it runs out of registers for the 64-bit value. */
4746 stl(addr, env->xmm_regs[i].XMM_L(0));
4747 stl(addr + 4, env->xmm_regs[i].XMM_L(1));
4748 stl(addr + 8, env->xmm_regs[i].XMM_L(2));
4749 stl(addr + 12, env->xmm_regs[i].XMM_L(3));
4750#endif
4751 addr += 16;
4752 }
4753 }
4754 }
4755 else
4756 {
4757 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4758 int fptag;
4759
4760 fp->FCW = env->fpuc;
4761 fp->FSW = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4762 fptag = 0;
4763 for (i=7; i>=0; i--) {
4764 fptag <<= 2;
4765 if (env->fptags[i]) {
4766 fptag |= 3;
4767 } else {
4768 /* the FPU automatically computes it */
4769 }
4770 }
4771 fp->FTW = fptag;
4772
4773 for(i = 0;i < 8; i++) {
4774 tmp = ST(i);
4775 helper_fstt_raw(tmp, &fp->regs[i].reg[0]);
4776 }
4777 }
4778}
4779
4780//*****************************************************************************
4781#undef lduw
4782#undef ldl
4783#undef ldq
4784#define lduw(a) *(uint16_t *)(a)
4785#define ldl(a) *(uint32_t *)(a)
4786#define ldq(a) *(uint64_t *)(a)
4787//*****************************************************************************
4788void save_raw_fp_state(CPUX86State *env, uint8_t *ptr)
4789{
4790 int i, fpus, fptag, nb_xmm_regs;
4791 CPU86_LDouble tmp;
4792 uint8_t *addr;
4793
4794 if (env->cpuid_features & CPUID_FXSR)
4795 {
4796 env->fpuc = lduw(ptr);
4797 fpus = lduw(ptr + 2);
4798 fptag = lduw(ptr + 4);
4799 env->fpstt = (fpus >> 11) & 7;
4800 env->fpus = fpus & ~0x3800;
4801 fptag ^= 0xff;
4802 for(i = 0;i < 8; i++) {
4803 env->fptags[i] = ((fptag >> i) & 1);
4804 }
4805
4806 addr = ptr + 0x20;
4807 for(i = 0;i < 8; i++) {
4808 tmp = helper_fldt_raw(addr);
4809 ST(i) = tmp;
4810 addr += 16;
4811 }
4812
4813 if (env->cr[4] & CR4_OSFXSR_MASK) {
4814 /* XXX: finish it, endianness */
4815 env->mxcsr = ldl(ptr + 0x18);
4816 //ldl(ptr + 0x1c);
4817 nb_xmm_regs = 8 << data64;
4818 addr = ptr + 0xa0;
4819 for(i = 0; i < nb_xmm_regs; i++) {
4820#if HC_ARCH_BITS == 32
4821 /* this is a workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=35135 */
4822 env->xmm_regs[i].XMM_L(0) = ldl(addr);
4823 env->xmm_regs[i].XMM_L(1) = ldl(addr + 4);
4824 env->xmm_regs[i].XMM_L(2) = ldl(addr + 8);
4825 env->xmm_regs[i].XMM_L(3) = ldl(addr + 12);
4826#else
4827 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4828 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4829#endif
4830 addr += 16;
4831 }
4832 }
4833 }
4834 else
4835 {
4836 PX86FPUSTATE fp = (PX86FPUSTATE)ptr;
4837 int fptag, j;
4838
4839 env->fpuc = fp->FCW;
4840 env->fpstt = (fp->FSW >> 11) & 7;
4841 env->fpus = fp->FSW & ~0x3800;
4842 fptag = fp->FTW;
4843 for(i = 0;i < 8; i++) {
4844 env->fptags[i] = ((fptag & 3) == 3);
4845 fptag >>= 2;
4846 }
4847 j = env->fpstt;
4848 for(i = 0;i < 8; i++) {
4849 tmp = helper_fldt_raw(&fp->regs[i].reg[0]);
4850 ST(i) = tmp;
4851 }
4852 }
4853}
4854//*****************************************************************************
4855//*****************************************************************************
4856
4857#endif /* VBOX */
4858
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette