VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 9379

最後變更 在這個檔案從9379是 6546,由 vboxsync 提交於 17 年 前

VBOX_WITH_NEW_PHYS_CODE changes mostly realted to REM. Killed a warning in cpu-exec.c.

  • 屬性 svn:eol-style 設為 native
檔案大小: 69.1 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#include "exec.h"
22#include "disas.h"
23
24#if !defined(CONFIG_SOFTMMU)
25#undef EAX
26#undef ECX
27#undef EDX
28#undef EBX
29#undef ESP
30#undef EBP
31#undef ESI
32#undef EDI
33#undef EIP
34#include <signal.h>
35#include <sys/ucontext.h>
36#endif
37
38int tb_invalidated_flag;
39
40//#define DEBUG_EXEC
41//#define DEBUG_SIGNAL
42
43#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_M68K)
44/* XXX: unify with i386 target */
45void cpu_loop_exit(void)
46{
47 longjmp(env->jmp_env, 1);
48}
49#endif
50#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
51#define reg_T2
52#endif
53
54/* exit the current TB from a signal handler. The host registers are
55 restored in a state compatible with the CPU emulator
56 */
57void cpu_resume_from_signal(CPUState *env1, void *puc)
58{
59#if !defined(CONFIG_SOFTMMU)
60 struct ucontext *uc = puc;
61#endif
62
63 env = env1;
64
65 /* XXX: restore cpu registers saved in host registers */
66
67#if !defined(CONFIG_SOFTMMU)
68 if (puc) {
69 /* XXX: use siglongjmp ? */
70 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
71 }
72#endif
73 longjmp(env->jmp_env, 1);
74}
75
76
77static TranslationBlock *tb_find_slow(target_ulong pc,
78 target_ulong cs_base,
79 unsigned int flags)
80{
81 TranslationBlock *tb, **ptb1;
82 int code_gen_size;
83 unsigned int h;
84 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
85 uint8_t *tc_ptr;
86
87 spin_lock(&tb_lock);
88
89 tb_invalidated_flag = 0;
90
91 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
92
93 /* find translated block using physical mappings */
94 phys_pc = get_phys_addr_code(env, pc);
95 phys_page1 = phys_pc & TARGET_PAGE_MASK;
96 phys_page2 = -1;
97 h = tb_phys_hash_func(phys_pc);
98 ptb1 = &tb_phys_hash[h];
99 for(;;) {
100 tb = *ptb1;
101 if (!tb)
102 goto not_found;
103 if (tb->pc == pc &&
104 tb->page_addr[0] == phys_page1 &&
105 tb->cs_base == cs_base &&
106 tb->flags == flags) {
107 /* check next page if needed */
108 if (tb->page_addr[1] != -1) {
109 virt_page2 = (pc & TARGET_PAGE_MASK) +
110 TARGET_PAGE_SIZE;
111 phys_page2 = get_phys_addr_code(env, virt_page2);
112 if (tb->page_addr[1] == phys_page2)
113 goto found;
114 } else {
115 goto found;
116 }
117 }
118 ptb1 = &tb->phys_hash_next;
119 }
120 not_found:
121 /* if no translated code available, then translate it now */
122 tb = tb_alloc(pc);
123 if (!tb) {
124 /* flush must be done */
125 tb_flush(env);
126 /* cannot fail at this point */
127 tb = tb_alloc(pc);
128 /* don't forget to invalidate previous TB info */
129 tb_invalidated_flag = 1;
130 }
131 tc_ptr = code_gen_ptr;
132 tb->tc_ptr = tc_ptr;
133 tb->cs_base = cs_base;
134 tb->flags = flags;
135 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
136 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
137
138 /* check next page if needed */
139 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
140 phys_page2 = -1;
141 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 }
144 tb_link_phys(tb, phys_pc, phys_page2);
145
146 found:
147 /* we add the TB in the virtual pc hash table */
148 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
149 spin_unlock(&tb_lock);
150 return tb;
151}
152
153static inline TranslationBlock *tb_find_fast(void)
154{
155 TranslationBlock *tb;
156 target_ulong cs_base, pc;
157 unsigned int flags;
158
159 /* we record a subset of the CPU state. It will
160 always be the same before a given translated block
161 is executed. */
162#if defined(TARGET_I386)
163 flags = env->hflags;
164 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
165 cs_base = env->segs[R_CS].base;
166 pc = cs_base + env->eip;
167#elif defined(TARGET_ARM)
168 flags = env->thumb | (env->vfp.vec_len << 1)
169 | (env->vfp.vec_stride << 4);
170 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
171 flags |= (1 << 6);
172 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
173 flags |= (1 << 7);
174 cs_base = 0;
175 pc = env->regs[15];
176#elif defined(TARGET_SPARC)
177#ifdef TARGET_SPARC64
178 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
179 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
180 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
181#else
182 // FPU enable . MMU enabled . MMU no-fault . Supervisor
183 flags = (env->psref << 3) | ((env->mmuregs[0] & (MMU_E | MMU_NF)) << 1)
184 | env->psrs;
185#endif
186 cs_base = env->npc;
187 pc = env->pc;
188#elif defined(TARGET_PPC)
189 flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
190 (msr_se << MSR_SE) | (msr_le << MSR_LE);
191 cs_base = 0;
192 pc = env->nip;
193#elif defined(TARGET_MIPS)
194 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
195 cs_base = 0;
196 pc = env->PC;
197#elif defined(TARGET_M68K)
198 flags = env->fpcr & M68K_FPCR_PREC;
199 cs_base = 0;
200 pc = env->pc;
201#elif defined(TARGET_SH4)
202 flags = env->sr & (SR_MD | SR_RB);
203 cs_base = 0; /* XXXXX */
204 pc = env->pc;
205#else
206#error unsupported CPU
207#endif
208 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
209 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
210 tb->flags != flags, 0)) {
211 tb = tb_find_slow(pc, cs_base, flags);
212 /* Note: we do it here to avoid a gcc bug on Mac OS X when
213 doing it in tb_find_slow */
214 if (tb_invalidated_flag) {
215 /* as some TB could have been invalidated because
216 of memory exceptions while generating the code, we
217 must recompute the hash index here */
218 T0 = 0;
219 }
220 }
221 return tb;
222}
223
224
225/* main execution loop */
226
227#ifdef VBOX
228
229int cpu_exec(CPUState *env1)
230{
231#define DECLARE_HOST_REGS 1
232#include "hostregs_helper.h"
233 int ret, interrupt_request;
234 void (*gen_func)(void);
235 TranslationBlock *tb;
236 uint8_t *tc_ptr;
237
238#if defined(TARGET_I386)
239 /* handle exit of HALTED state */
240 if (env1->hflags & HF_HALTED_MASK) {
241 /* disable halt condition */
242 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
243 (env1->eflags & IF_MASK)) {
244 env1->hflags &= ~HF_HALTED_MASK;
245 } else {
246 return EXCP_HALTED;
247 }
248 }
249#elif defined(TARGET_PPC)
250 if (env1->halted) {
251 if (env1->msr[MSR_EE] &&
252 (env1->interrupt_request &
253 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
254 env1->halted = 0;
255 } else {
256 return EXCP_HALTED;
257 }
258 }
259#elif defined(TARGET_SPARC)
260 if (env1->halted) {
261 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
262 (env1->psret != 0)) {
263 env1->halted = 0;
264 } else {
265 return EXCP_HALTED;
266 }
267 }
268#elif defined(TARGET_ARM)
269 if (env1->halted) {
270 /* An interrupt wakes the CPU even if the I and F CPSR bits are
271 set. */
272 if (env1->interrupt_request
273 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
274 env1->halted = 0;
275 } else {
276 return EXCP_HALTED;
277 }
278 }
279#elif defined(TARGET_MIPS)
280 if (env1->halted) {
281 if (env1->interrupt_request &
282 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
283 env1->halted = 0;
284 } else {
285 return EXCP_HALTED;
286 }
287 }
288#endif
289
290 cpu_single_env = env1;
291
292 /* first we save global registers */
293#define SAVE_HOST_REGS 1
294#include "hostregs_helper.h"
295 env = env1;
296#if defined(__sparc__) && !defined(HOST_SOLARIS)
297 /* we also save i7 because longjmp may not restore it */
298 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
299#endif
300
301#if defined(TARGET_I386)
302
303 env_to_regs();
304 /* put eflags in CPU temporary format */
305 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
306 DF = 1 - (2 * ((env->eflags >> 10) & 1));
307 CC_OP = CC_OP_EFLAGS;
308 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
309#elif defined(TARGET_ARM)
310#elif defined(TARGET_SPARC)
311#if defined(reg_REGWPTR)
312 saved_regwptr = REGWPTR;
313#endif
314#elif defined(TARGET_PPC)
315#elif defined(TARGET_MIPS)
316#elif defined(TARGET_SH4)
317 /* XXXXX */
318#else
319#error unsupported target CPU
320#endif
321#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
322 env->exception_index = -1;
323#endif
324
325 /* prepare setjmp context for exception handling */
326 for(;;) {
327 if (setjmp(env->jmp_env) == 0)
328 {
329 env->current_tb = NULL;
330 VMMR3Unlock(env->pVM);
331 VMMR3Lock(env->pVM);
332
333 /*
334 * Check for fatal errors first
335 */
336 if (env->interrupt_request & CPU_INTERRUPT_RC) {
337 env->exception_index = EXCP_RC;
338 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
339 ret = env->exception_index;
340 cpu_loop_exit();
341 }
342
343 /* if an exception is pending, we execute it here */
344 if (env->exception_index >= 0) {
345 Assert(!env->user_mode_only);
346 if (env->exception_index >= EXCP_INTERRUPT) {
347 /* exit request from the cpu execution loop */
348 ret = env->exception_index;
349 break;
350 } else {
351 /* simulate a real cpu exception. On i386, it can
352 trigger new exceptions, but we do not handle
353 double or triple faults yet. */
354 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
355 Log(("do_interrupt %d %d %08x\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
356 do_interrupt(env->exception_index,
357 env->exception_is_int,
358 env->error_code,
359 env->exception_next_eip, 0);
360 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
361 }
362 env->exception_index = -1;
363 }
364
365 T0 = 0; /* force lookup of first TB */
366 for(;;)
367 {
368 interrupt_request = env->interrupt_request;
369 if (__builtin_expect(interrupt_request, 0))
370 {
371 /* Single instruction exec request, we execute it and return (one way or the other).
372 The caller will always reschedule after doing this operation! */
373 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
374 {
375 /* not in flight are we? (if we are, we trapped) */
376 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
377 {
378 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
379 env->exception_index = EXCP_SINGLE_INSTR;
380 if (emulate_single_instr(env) == -1)
381 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
382
383 /* When we receive an external interrupt during execution of this single
384 instruction, then we should stay here. We will leave when we're ready
385 for raw-mode or when interrupted by pending EMT requests. */
386 interrupt_request = env->interrupt_request; /* reload this! */
387 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
388 || !(env->eflags & IF_MASK)
389 || (env->hflags & HF_INHIBIT_IRQ_MASK)
390 || (env->state & CPU_RAW_HWACC)
391 )
392 {
393 env->exception_index = ret = EXCP_SINGLE_INSTR;
394 cpu_loop_exit();
395 }
396 }
397 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
398 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
399 }
400
401 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
402 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
403 !(env->hflags & HF_SMM_MASK)) {
404 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
405 do_smm_enter();
406 T0 = 0;
407 }
408 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK))
411 {
412 /* if hardware interrupt pending, we execute it */
413 int intno;
414 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
415 intno = cpu_get_pic_interrupt(env);
416 if (intno >= 0)
417 {
418 Log(("do_interrupt %d\n", intno));
419 do_interrupt(intno, 0, 0, 0, 1);
420 }
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 T0 = 0;
424 }
425 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
426 {
427 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
428 /* ensure that no TB jump will be modified as
429 the program flow was changed */
430 T0 = 0;
431 }
432 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
433 if (interrupt_request & CPU_INTERRUPT_EXIT)
434 {
435 env->exception_index = EXCP_INTERRUPT;
436 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
437 ret = env->exception_index;
438 cpu_loop_exit();
439 }
440 if (interrupt_request & CPU_INTERRUPT_RC)
441 {
442 env->exception_index = EXCP_RC;
443 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
444 ret = env->exception_index;
445 cpu_loop_exit();
446 }
447 }
448
449 /*
450 * Check if we the CPU state allows us to execute the code in raw-mode.
451 */
452 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
453 if (remR3CanExecuteRaw(env,
454 env->eip + env->segs[R_CS].base,
455 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
456 &env->exception_index))
457 {
458 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
459 ret = env->exception_index;
460 cpu_loop_exit();
461 }
462 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
463
464 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
465 tb = tb_find_fast();
466
467 /* see if we can patch the calling TB. When the TB
468 spans two pages, we cannot safely do a direct
469 jump. */
470 if (T0 != 0
471 && !(tb->cflags & CF_RAW_MODE)
472 && tb->page_addr[1] == -1)
473 {
474 spin_lock(&tb_lock);
475 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
476 spin_unlock(&tb_lock);
477 }
478 tc_ptr = tb->tc_ptr;
479 env->current_tb = tb;
480 /* execute the generated code */
481 gen_func = (void *)tc_ptr;
482 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
483
484#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
485#if !defined(DEBUG_bird)
486 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
487 {
488 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
489 {
490 Log(("EMR0: %08X ESP=%08X IF=%d TF=%d CPL=%d\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
491 }
492 }
493 else
494 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
495 {
496 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
497 {
498 if(env->eflags & VM_MASK)
499 {
500 Log(("EMV86: %04X:%04X IF=%d TF=%d CPL=%d CR0=%08X\n", env->segs[R_CS].selector, env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0]));
501 }
502 else
503 {
504 Log(("EMR3: %08X ESP=%08X IF=%d TF=%d CPL=%d IOPL=%d CR0=%08X\n", env->eip, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), env->cr[0]));
505 }
506 }
507 }
508 else
509 {
510 Log(("EMRM: %04X:%08X SS:ESP=%04X:%08X IF=%d TF=%d CPL=%d PE=%d PG=%d\n", env->segs[R_CS].selector, env->eip, env->segs[R_SS].selector, ESP, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, env->cr[0] & X86_CR0_PE, env->cr[0] & X86_CR0_PG));
511 }
512#endif /* !DEBUG_bird */
513 if(env->state & CPU_EMULATE_SINGLE_STEP)
514 {
515#ifdef DEBUG_bird
516 static int s_cTimes = 0;
517 if (s_cTimes++ > 1000000)
518 {
519 RTLogPrintf("Enough stepping!\n");
520 #if 0
521 env->exception_index = EXCP_DEBUG;
522 ret = env->exception_index;
523 cpu_loop_exit();
524 #else
525 env->state &= ~CPU_EMULATE_SINGLE_STEP;
526 #endif
527 }
528#endif
529 TMCpuTickPause(env->pVM);
530 remR3DisasInstr(env, -1, NULL);
531 TMCpuTickResume(env->pVM);
532 if(emulate_single_instr(env) == -1)
533 {
534 Log(("emulate_single_instr failed for EIP=%08X!!\n", env->eip));
535 }
536 }
537 else
538 {
539 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
540 gen_func();
541 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
542 }
543#else /* !DEBUG || !VBOX || DEBUG_dmik */
544
545 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
546 gen_func();
547 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
548
549#endif /* !DEBUG || !VBOX || DEBUG_dmik */
550 env->current_tb = NULL;
551 /* reset soft MMU for next block (it can currently
552 only be set by a memory fault) */
553#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
554 if (env->hflags & HF_SOFTMMU_MASK) {
555 env->hflags &= ~HF_SOFTMMU_MASK;
556 /* do not allow linking to another block */
557 T0 = 0;
558 }
559#endif
560 }
561 } else {
562 env_to_regs();
563 }
564#ifdef VBOX_HIGH_RES_TIMERS_HACK
565 /* NULL the current_tb here so cpu_interrupt() doesn't do
566 anything unnecessary (like crashing during emulate single instruction). */
567 env->current_tb = NULL;
568 TMTimerPoll(env1->pVM);
569#endif
570 } /* for(;;) */
571
572#if defined(TARGET_I386)
573 /* restore flags in standard format */
574 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
575#else
576#error unsupported target CPU
577#endif
578#include "hostregs_helper.h"
579 return ret;
580}
581
582
583#else /* !VBOX */
584
585
586int cpu_exec(CPUState *env1)
587{
588#define DECLARE_HOST_REGS 1
589#include "hostregs_helper.h"
590#if defined(__sparc__) && !defined(HOST_SOLARIS)
591 int saved_i7;
592 target_ulong tmp_T0;
593#endif
594 int ret, interrupt_request;
595 void (*gen_func)(void);
596 TranslationBlock *tb;
597 uint8_t *tc_ptr;
598
599#if defined(TARGET_I386)
600 /* handle exit of HALTED state */
601 if (env1->hflags & HF_HALTED_MASK) {
602 /* disable halt condition */
603 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
604 (env1->eflags & IF_MASK)) {
605 env1->hflags &= ~HF_HALTED_MASK;
606 } else {
607 return EXCP_HALTED;
608 }
609 }
610#elif defined(TARGET_PPC)
611 if (env1->halted) {
612 if (env1->msr[MSR_EE] &&
613 (env1->interrupt_request &
614 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER))) {
615 env1->halted = 0;
616 } else {
617 return EXCP_HALTED;
618 }
619 }
620#elif defined(TARGET_SPARC)
621 if (env1->halted) {
622 if ((env1->interrupt_request & CPU_INTERRUPT_HARD) &&
623 (env1->psret != 0)) {
624 env1->halted = 0;
625 } else {
626 return EXCP_HALTED;
627 }
628 }
629#elif defined(TARGET_ARM)
630 if (env1->halted) {
631 /* An interrupt wakes the CPU even if the I and F CPSR bits are
632 set. */
633 if (env1->interrupt_request
634 & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD)) {
635 env1->halted = 0;
636 } else {
637 return EXCP_HALTED;
638 }
639 }
640#elif defined(TARGET_MIPS)
641 if (env1->halted) {
642 if (env1->interrupt_request &
643 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER)) {
644 env1->halted = 0;
645 } else {
646 return EXCP_HALTED;
647 }
648 }
649#endif
650
651 cpu_single_env = env1;
652
653 /* first we save global registers */
654#define SAVE_HOST_REGS 1
655#include "hostregs_helper.h"
656 env = env1;
657#if defined(__sparc__) && !defined(HOST_SOLARIS)
658 /* we also save i7 because longjmp may not restore it */
659 asm volatile ("mov %%i7, %0" : "=r" (saved_i7));
660#endif
661
662#if defined(TARGET_I386)
663 env_to_regs();
664 /* put eflags in CPU temporary format */
665 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
666 DF = 1 - (2 * ((env->eflags >> 10) & 1));
667 CC_OP = CC_OP_EFLAGS;
668 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
669#elif defined(TARGET_ARM)
670#elif defined(TARGET_SPARC)
671#if defined(reg_REGWPTR)
672 saved_regwptr = REGWPTR;
673#endif
674#elif defined(TARGET_PPC)
675#elif defined(TARGET_M68K)
676 env->cc_op = CC_OP_FLAGS;
677 env->cc_dest = env->sr & 0xf;
678 env->cc_x = (env->sr >> 4) & 1;
679#elif defined(TARGET_MIPS)
680#elif defined(TARGET_SH4)
681 /* XXXXX */
682#else
683#error unsupported target CPU
684#endif
685#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
686 env->exception_index = -1;
687#endif
688
689 /* prepare setjmp context for exception handling */
690 for(;;) {
691 if (setjmp(env->jmp_env) == 0) {
692 env->current_tb = NULL;
693#ifdef VBOX
694 VMMR3Unlock(env->pVM);
695 VMMR3Lock(env->pVM);
696
697 /* Check for high priority requests first (like fatal
698 errors). */
699 if (env->interrupt_request & CPU_INTERRUPT_RC) {
700 env->exception_index = EXCP_RC;
701 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
702 ret = env->exception_index;
703 cpu_loop_exit();
704 }
705#endif /* VBOX */
706
707
708 /* if an exception is pending, we execute it here */
709 if (env->exception_index >= 0) {
710 if (env->exception_index >= EXCP_INTERRUPT) {
711 /* exit request from the cpu execution loop */
712 ret = env->exception_index;
713 break;
714 } else if (env->user_mode_only) {
715 /* if user mode only, we simulate a fake exception
716 which will be handled outside the cpu execution
717 loop */
718#if defined(TARGET_I386)
719 do_interrupt_user(env->exception_index,
720 env->exception_is_int,
721 env->error_code,
722 env->exception_next_eip);
723#endif
724 ret = env->exception_index;
725 break;
726 } else {
727#if defined(TARGET_I386)
728 /* simulate a real cpu exception. On i386, it can
729 trigger new exceptions, but we do not handle
730 double or triple faults yet. */
731 do_interrupt(env->exception_index,
732 env->exception_is_int,
733 env->error_code,
734 env->exception_next_eip, 0);
735#elif defined(TARGET_PPC)
736 do_interrupt(env);
737#elif defined(TARGET_MIPS)
738 do_interrupt(env);
739#elif defined(TARGET_SPARC)
740 do_interrupt(env->exception_index);
741#elif defined(TARGET_ARM)
742 do_interrupt(env);
743#elif defined(TARGET_SH4)
744 do_interrupt(env);
745#endif
746 }
747 env->exception_index = -1;
748 }
749#ifdef USE_KQEMU
750 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
751 int ret;
752 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
753 ret = kqemu_cpu_exec(env);
754 /* put eflags in CPU temporary format */
755 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
756 DF = 1 - (2 * ((env->eflags >> 10) & 1));
757 CC_OP = CC_OP_EFLAGS;
758 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
759 if (ret == 1) {
760 /* exception */
761 longjmp(env->jmp_env, 1);
762 } else if (ret == 2) {
763 /* softmmu execution needed */
764 } else {
765 if (env->interrupt_request != 0) {
766 /* hardware interrupt will be executed just after */
767 } else {
768 /* otherwise, we restart */
769 longjmp(env->jmp_env, 1);
770 }
771 }
772 }
773#endif
774
775 T0 = 0; /* force lookup of first TB */
776 for(;;) {
777#if defined(__sparc__) && !defined(HOST_SOLARIS)
778 /* g1 can be modified by some libc? functions */
779 tmp_T0 = T0;
780#endif
781 interrupt_request = env->interrupt_request;
782 if (__builtin_expect(interrupt_request, 0)) {
783#ifdef VBOX
784 /* Single instruction exec request, we execute it and return (one way or the other).
785 The caller will always reschedule after doing this operation! */
786 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
787 {
788 /* not in flight are we? */
789 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
790 {
791 ASMAtomicOrS32(&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
792 env->exception_index = EXCP_SINGLE_INSTR;
793 if (emulate_single_instr(env) == -1)
794 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%08x!!\n", env->eip));
795
796 /* When we receive an external interrupt during execution of this single
797 instruction, then we should stay here. We will leave when we're ready
798 for raw-mode or when interrupted by pending EMT requests. */
799 interrupt_request = env->interrupt_request; /* reload this! */
800 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
801 || !(env->eflags & IF_MASK)
802 || (env->hflags & HF_INHIBIT_IRQ_MASK)
803 )
804 {
805 env->exception_index = ret = EXCP_SINGLE_INSTR;
806 cpu_loop_exit();
807 }
808 }
809 env->exception_index = EXCP_SINGLE_INSTR;
810 cpu_loop_exit();
811 }
812
813 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
814#endif /* VBOX */
815#if defined(TARGET_I386)
816 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
817 !(env->hflags & HF_SMM_MASK)) {
818 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
819 do_smm_enter();
820#if defined(__sparc__) && !defined(HOST_SOLARIS)
821 tmp_T0 = 0;
822#else
823 T0 = 0;
824#endif
825 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
826 (env->eflags & IF_MASK) &&
827 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
828 int intno;
829#if defined(VBOX)
830 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_HARD);
831#else
832 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
833#endif
834 intno = cpu_get_pic_interrupt(env);
835 if (loglevel & CPU_LOG_TB_IN_ASM) {
836 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
837 }
838#if defined(VBOX)
839 if (intno >= 0)
840#endif
841 do_interrupt(intno, 0, 0, 0, 1);
842 /* ensure that no TB jump will be modified as
843 the program flow was changed */
844#if defined(__sparc__) && !defined(HOST_SOLARIS)
845 tmp_T0 = 0;
846#else
847 T0 = 0;
848#endif
849 }
850#elif defined(TARGET_PPC)
851#if 0
852 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
853 cpu_ppc_reset(env);
854 }
855#endif
856 if (msr_ee != 0) {
857 if ((interrupt_request & CPU_INTERRUPT_HARD)) {
858 /* Raise it */
859 env->exception_index = EXCP_EXTERNAL;
860 env->error_code = 0;
861 do_interrupt(env);
862 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
863#if defined(__sparc__) && !defined(HOST_SOLARIS)
864 tmp_T0 = 0;
865#else
866 T0 = 0;
867#endif
868 } else if ((interrupt_request & CPU_INTERRUPT_TIMER)) {
869 /* Raise it */
870 env->exception_index = EXCP_DECR;
871 env->error_code = 0;
872 do_interrupt(env);
873 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
874#if defined(__sparc__) && !defined(HOST_SOLARIS)
875 tmp_T0 = 0;
876#else
877 T0 = 0;
878#endif
879 }
880 }
881#elif defined(TARGET_MIPS)
882 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
883 (env->CP0_Status & (1 << CP0St_IE)) &&
884 (env->CP0_Status & env->CP0_Cause & 0x0000FF00) &&
885 !(env->hflags & MIPS_HFLAG_EXL) &&
886 !(env->hflags & MIPS_HFLAG_ERL) &&
887 !(env->hflags & MIPS_HFLAG_DM)) {
888 /* Raise it */
889 env->exception_index = EXCP_EXT_INTERRUPT;
890 env->error_code = 0;
891 do_interrupt(env);
892#if defined(__sparc__) && !defined(HOST_SOLARIS)
893 tmp_T0 = 0;
894#else
895 T0 = 0;
896#endif
897 }
898#elif defined(TARGET_SPARC)
899 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
900 (env->psret != 0)) {
901 int pil = env->interrupt_index & 15;
902 int type = env->interrupt_index & 0xf0;
903
904 if (((type == TT_EXTINT) &&
905 (pil == 15 || pil > env->psrpil)) ||
906 type != TT_EXTINT) {
907 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
908 do_interrupt(env->interrupt_index);
909 env->interrupt_index = 0;
910#if defined(__sparc__) && !defined(HOST_SOLARIS)
911 tmp_T0 = 0;
912#else
913 T0 = 0;
914#endif
915 }
916 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
917 //do_interrupt(0, 0, 0, 0, 0);
918 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
919 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
920 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
921 env->halted = 1;
922 env->exception_index = EXCP_HLT;
923 cpu_loop_exit();
924 }
925#elif defined(TARGET_ARM)
926 if (interrupt_request & CPU_INTERRUPT_FIQ
927 && !(env->uncached_cpsr & CPSR_F)) {
928 env->exception_index = EXCP_FIQ;
929 do_interrupt(env);
930 }
931 if (interrupt_request & CPU_INTERRUPT_HARD
932 && !(env->uncached_cpsr & CPSR_I)) {
933 env->exception_index = EXCP_IRQ;
934 do_interrupt(env);
935 }
936#elif defined(TARGET_SH4)
937 /* XXXXX */
938#endif
939 /* Don't use the cached interupt_request value,
940 do_interrupt may have updated the EXITTB flag. */
941 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
942#if defined(VBOX)
943 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
944#else
945 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
946#endif
947 /* ensure that no TB jump will be modified as
948 the program flow was changed */
949#if defined(__sparc__) && !defined(HOST_SOLARIS)
950 tmp_T0 = 0;
951#else
952 T0 = 0;
953#endif
954 }
955#ifdef VBOX
956 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
957#endif
958 if (interrupt_request & CPU_INTERRUPT_EXIT) {
959#if defined(VBOX)
960 env->exception_index = EXCP_INTERRUPT;
961 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
962#else
963 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
964 env->exception_index = EXCP_INTERRUPT;
965#endif
966 cpu_loop_exit();
967 }
968#if defined(VBOX)
969 if (interrupt_request & CPU_INTERRUPT_RC) {
970 env->exception_index = EXCP_RC;
971 ASMAtomicAndS32(&env->interrupt_request, ~CPU_INTERRUPT_RC);
972 cpu_loop_exit();
973 }
974#endif
975 }
976#ifdef DEBUG_EXEC
977 if ((loglevel & CPU_LOG_TB_CPU)) {
978#if defined(TARGET_I386)
979 /* restore flags in standard format */
980#ifdef reg_EAX
981 env->regs[R_EAX] = EAX;
982#endif
983#ifdef reg_EBX
984 env->regs[R_EBX] = EBX;
985#endif
986#ifdef reg_ECX
987 env->regs[R_ECX] = ECX;
988#endif
989#ifdef reg_EDX
990 env->regs[R_EDX] = EDX;
991#endif
992#ifdef reg_ESI
993 env->regs[R_ESI] = ESI;
994#endif
995#ifdef reg_EDI
996 env->regs[R_EDI] = EDI;
997#endif
998#ifdef reg_EBP
999 env->regs[R_EBP] = EBP;
1000#endif
1001#ifdef reg_ESP
1002 env->regs[R_ESP] = ESP;
1003#endif
1004 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1005 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1006 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1007#elif defined(TARGET_ARM)
1008 cpu_dump_state(env, logfile, fprintf, 0);
1009#elif defined(TARGET_SPARC)
1010 REGWPTR = env->regbase + (env->cwp * 16);
1011 env->regwptr = REGWPTR;
1012 cpu_dump_state(env, logfile, fprintf, 0);
1013#elif defined(TARGET_PPC)
1014 cpu_dump_state(env, logfile, fprintf, 0);
1015#elif defined(TARGET_M68K)
1016 cpu_m68k_flush_flags(env, env->cc_op);
1017 env->cc_op = CC_OP_FLAGS;
1018 env->sr = (env->sr & 0xffe0)
1019 | env->cc_dest | (env->cc_x << 4);
1020 cpu_dump_state(env, logfile, fprintf, 0);
1021#elif defined(TARGET_MIPS)
1022 cpu_dump_state(env, logfile, fprintf, 0);
1023#elif defined(TARGET_SH4)
1024 cpu_dump_state(env, logfile, fprintf, 0);
1025#else
1026#error unsupported target CPU
1027#endif
1028 }
1029#endif
1030#ifdef VBOX
1031 /*
1032 * Check if we the CPU state allows us to execute the code in raw-mode.
1033 */
1034 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
1035 if (remR3CanExecuteRaw(env,
1036 env->eip + env->segs[R_CS].base,
1037 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))
1038 flags, &env->exception_index))
1039 {
1040 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1041 ret = env->exception_index;
1042 cpu_loop_exit();
1043 }
1044 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
1045#endif /* VBOX */
1046 tb = tb_find_fast();
1047#ifdef DEBUG_EXEC
1048 if ((loglevel & CPU_LOG_EXEC)) {
1049 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
1050 (long)tb->tc_ptr, tb->pc,
1051 lookup_symbol(tb->pc));
1052 }
1053#endif
1054#if defined(__sparc__) && !defined(HOST_SOLARIS)
1055 T0 = tmp_T0;
1056#endif
1057 /* see if we can patch the calling TB. When the TB
1058 spans two pages, we cannot safely do a direct
1059 jump. */
1060 {
1061 if (T0 != 0 &&
1062#if USE_KQEMU
1063 (env->kqemu_enabled != 2) &&
1064#endif
1065#ifdef VBOX
1066 !(tb->cflags & CF_RAW_MODE) &&
1067#endif
1068 tb->page_addr[1] == -1
1069#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1070 && (tb->cflags & CF_CODE_COPY) ==
1071 (((TranslationBlock *)(T0 & ~3))->cflags & CF_CODE_COPY)
1072#endif
1073 ) {
1074 spin_lock(&tb_lock);
1075 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
1076#if defined(USE_CODE_COPY)
1077 /* propagates the FP use info */
1078 ((TranslationBlock *)(T0 & ~3))->cflags |=
1079 (tb->cflags & CF_FP_USED);
1080#endif
1081 spin_unlock(&tb_lock);
1082 }
1083 }
1084 tc_ptr = tb->tc_ptr;
1085 env->current_tb = tb;
1086 /* execute the generated code */
1087 gen_func = (void *)tc_ptr;
1088#if defined(__sparc__)
1089 __asm__ __volatile__("call %0\n\t"
1090 "mov %%o7,%%i0"
1091 : /* no outputs */
1092 : "r" (gen_func)
1093 : "i0", "i1", "i2", "i3", "i4", "i5",
1094 "l0", "l1", "l2", "l3", "l4", "l5",
1095 "l6", "l7");
1096#elif defined(__arm__)
1097 asm volatile ("mov pc, %0\n\t"
1098 ".global exec_loop\n\t"
1099 "exec_loop:\n\t"
1100 : /* no outputs */
1101 : "r" (gen_func)
1102 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
1103#elif defined(TARGET_I386) && defined(USE_CODE_COPY)
1104{
1105 if (!(tb->cflags & CF_CODE_COPY)) {
1106 if ((tb->cflags & CF_FP_USED) && env->native_fp_regs) {
1107 save_native_fp_state(env);
1108 }
1109 gen_func();
1110 } else {
1111 if ((tb->cflags & CF_FP_USED) && !env->native_fp_regs) {
1112 restore_native_fp_state(env);
1113 }
1114 /* we work with native eflags */
1115 CC_SRC = cc_table[CC_OP].compute_all();
1116 CC_OP = CC_OP_EFLAGS;
1117 asm(".globl exec_loop\n"
1118 "\n"
1119 "debug1:\n"
1120 " pushl %%ebp\n"
1121 " fs movl %10, %9\n"
1122 " fs movl %11, %%eax\n"
1123 " andl $0x400, %%eax\n"
1124 " fs orl %8, %%eax\n"
1125 " pushl %%eax\n"
1126 " popf\n"
1127 " fs movl %%esp, %12\n"
1128 " fs movl %0, %%eax\n"
1129 " fs movl %1, %%ecx\n"
1130 " fs movl %2, %%edx\n"
1131 " fs movl %3, %%ebx\n"
1132 " fs movl %4, %%esp\n"
1133 " fs movl %5, %%ebp\n"
1134 " fs movl %6, %%esi\n"
1135 " fs movl %7, %%edi\n"
1136 " fs jmp *%9\n"
1137 "exec_loop:\n"
1138 " fs movl %%esp, %4\n"
1139 " fs movl %12, %%esp\n"
1140 " fs movl %%eax, %0\n"
1141 " fs movl %%ecx, %1\n"
1142 " fs movl %%edx, %2\n"
1143 " fs movl %%ebx, %3\n"
1144 " fs movl %%ebp, %5\n"
1145 " fs movl %%esi, %6\n"
1146 " fs movl %%edi, %7\n"
1147 " pushf\n"
1148 " popl %%eax\n"
1149 " movl %%eax, %%ecx\n"
1150 " andl $0x400, %%ecx\n"
1151 " shrl $9, %%ecx\n"
1152 " andl $0x8d5, %%eax\n"
1153 " fs movl %%eax, %8\n"
1154 " movl $1, %%eax\n"
1155 " subl %%ecx, %%eax\n"
1156 " fs movl %%eax, %11\n"
1157 " fs movl %9, %%ebx\n" /* get T0 value */
1158 " popl %%ebp\n"
1159 :
1160 : "m" (*(uint8_t *)offsetof(CPUState, regs[0])),
1161 "m" (*(uint8_t *)offsetof(CPUState, regs[1])),
1162 "m" (*(uint8_t *)offsetof(CPUState, regs[2])),
1163 "m" (*(uint8_t *)offsetof(CPUState, regs[3])),
1164 "m" (*(uint8_t *)offsetof(CPUState, regs[4])),
1165 "m" (*(uint8_t *)offsetof(CPUState, regs[5])),
1166 "m" (*(uint8_t *)offsetof(CPUState, regs[6])),
1167 "m" (*(uint8_t *)offsetof(CPUState, regs[7])),
1168 "m" (*(uint8_t *)offsetof(CPUState, cc_src)),
1169 "m" (*(uint8_t *)offsetof(CPUState, tmp0)),
1170 "a" (gen_func),
1171 "m" (*(uint8_t *)offsetof(CPUState, df)),
1172 "m" (*(uint8_t *)offsetof(CPUState, saved_esp))
1173 : "%ecx", "%edx"
1174 );
1175 }
1176}
1177#elif defined(__ia64)
1178 struct fptr {
1179 void *ip;
1180 void *gp;
1181 } fp;
1182
1183 fp.ip = tc_ptr;
1184 fp.gp = code_gen_buffer + 2 * (1 << 20);
1185 (*(void (*)(void)) &fp)();
1186#else
1187#if defined(DEBUG) && defined(VBOX) && !defined(DEBUG_dmik)
1188#if !defined(DEBUG_bird)
1189 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 0 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1190 {
1191 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1192 {
1193 Log(("EMR0: %08X IF=%d TF=%d CPL=%d\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3));
1194 }
1195 }
1196 else
1197 if (((env->hflags >> HF_CPL_SHIFT) & 3) == 3 && (env->hflags & HF_PE_MASK) && (env->cr[0] & CR0_PG_MASK))
1198 {
1199 if(!(env->state & CPU_EMULATE_SINGLE_STEP))
1200 {
1201 if(env->eflags & VM_MASK)
1202 {
1203 Log(("EMV86: %08X IF=%d TF=%d CPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, flags, env->cr[0]));
1204 }
1205 else
1206 {
1207 Log(("EMR3: %08X IF=%d TF=%d CPL=%d IOPL=%d flags=%08X CR0=%08X\n", env->eip, (env->eflags & IF_MASK) ? 1 : 0, (env->eflags & TF_MASK) ? 1 : 0, (env->hflags >> HF_CPL_SHIFT) & 3, ((env->eflags >> IOPL_SHIFT) & 3), flags, env->cr[0]));
1208 }
1209 }
1210 }
1211#endif /* !DEBUG_bird */
1212 if(env->state & CPU_EMULATE_SINGLE_STEP)
1213 {
1214#ifdef DEBUG_bird
1215 static int s_cTimes = 0;
1216 if (s_cTimes++ > 1000000) /* 1 million */
1217 {
1218 RTLogPrintf("Enough stepping!\n");
1219 #if 0
1220 env->exception_index = EXCP_DEBUG;
1221 cpu_loop_exit();
1222 #else
1223 env->state &= ~CPU_EMULATE_SINGLE_STEP;
1224 #endif
1225 }
1226#endif
1227 TMCpuTickPause(env->pVM);
1228 remR3DisasInstr(env, -1, NULL);
1229 TMCpuTickResume(env->pVM);
1230 if(emulate_single_instr(env) == -1)
1231 {
1232 printf("emulate_single_instr failed for EIP=%08X!!\n", env->eip);
1233 }
1234 }
1235 else
1236 {
1237 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1238 gen_func();
1239 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1240 }
1241#else /* !DEBUG || !VBOX || DEBUG_dmik */
1242
1243#ifdef VBOX
1244 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
1245 gen_func();
1246 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
1247#else /* !VBOX */
1248 gen_func();
1249#endif /* !VBOX */
1250
1251#endif /* !DEBUG || !VBOX || DEBUG_dmik */
1252#endif
1253 env->current_tb = NULL;
1254 /* reset soft MMU for next block (it can currently
1255 only be set by a memory fault) */
1256#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
1257 if (env->hflags & HF_SOFTMMU_MASK) {
1258 env->hflags &= ~HF_SOFTMMU_MASK;
1259 /* do not allow linking to another block */
1260 T0 = 0;
1261 }
1262#endif
1263#if defined(USE_KQEMU)
1264#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
1265 if (kqemu_is_ok(env) &&
1266 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
1267 cpu_loop_exit();
1268 }
1269#endif
1270 }
1271 } else {
1272 env_to_regs();
1273 }
1274 } /* for(;;) */
1275
1276
1277#if defined(TARGET_I386)
1278#if defined(USE_CODE_COPY)
1279 if (env->native_fp_regs) {
1280 save_native_fp_state(env);
1281 }
1282#endif
1283 /* restore flags in standard format */
1284 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
1285#elif defined(TARGET_ARM)
1286 /* XXX: Save/restore host fpu exception state?. */
1287#elif defined(TARGET_SPARC)
1288#if defined(reg_REGWPTR)
1289 REGWPTR = saved_regwptr;
1290#endif
1291#elif defined(TARGET_PPC)
1292#elif defined(TARGET_M68K)
1293 cpu_m68k_flush_flags(env, env->cc_op);
1294 env->cc_op = CC_OP_FLAGS;
1295 env->sr = (env->sr & 0xffe0)
1296 | env->cc_dest | (env->cc_x << 4);
1297#elif defined(TARGET_MIPS)
1298#elif defined(TARGET_SH4)
1299 /* XXXXX */
1300#else
1301#error unsupported target CPU
1302#endif
1303#if defined(__sparc__) && !defined(HOST_SOLARIS)
1304 asm volatile ("mov %0, %%i7" : : "r" (saved_i7));
1305#endif
1306#include "hostregs_helper.h"
1307
1308 /* fail safe : never use cpu_single_env outside cpu_exec() */
1309 cpu_single_env = NULL;
1310 return ret;
1311}
1312
1313#endif /* !VBOX */
1314
1315/* must only be called from the generated code as an exception can be
1316 generated */
1317void tb_invalidate_page_range(target_ulong start, target_ulong end)
1318{
1319 /* XXX: cannot enable it yet because it yields to MMU exception
1320 where NIP != read address on PowerPC */
1321#if 0
1322 target_ulong phys_addr;
1323 phys_addr = get_phys_addr_code(env, start);
1324 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1325#endif
1326}
1327
1328#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1329
1330void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1331{
1332 CPUX86State *saved_env;
1333
1334 saved_env = env;
1335 env = s;
1336 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1337 selector &= 0xffff;
1338 cpu_x86_load_seg_cache(env, seg_reg, selector,
1339 (selector << 4), 0xffff, 0);
1340 } else {
1341 load_seg(seg_reg, selector);
1342 }
1343 env = saved_env;
1344}
1345
1346void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1347{
1348 CPUX86State *saved_env;
1349
1350 saved_env = env;
1351 env = s;
1352
1353 helper_fsave((target_ulong)ptr, data32);
1354
1355 env = saved_env;
1356}
1357
1358void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1359{
1360 CPUX86State *saved_env;
1361
1362 saved_env = env;
1363 env = s;
1364
1365 helper_frstor((target_ulong)ptr, data32);
1366
1367 env = saved_env;
1368}
1369
1370#endif /* TARGET_I386 */
1371
1372#if !defined(CONFIG_SOFTMMU)
1373
1374#if defined(TARGET_I386)
1375
1376/* 'pc' is the host PC at which the exception was raised. 'address' is
1377 the effective address of the memory exception. 'is_write' is 1 if a
1378 write caused the exception and otherwise 0'. 'old_set' is the
1379 signal set which should be restored */
1380static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1381 int is_write, sigset_t *old_set,
1382 void *puc)
1383{
1384 TranslationBlock *tb;
1385 int ret;
1386
1387 if (cpu_single_env)
1388 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1389#if defined(DEBUG_SIGNAL)
1390 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1391 pc, address, is_write, *(unsigned long *)old_set);
1392#endif
1393 /* XXX: locking issue */
1394 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1395 return 1;
1396 }
1397
1398 /* see if it is an MMU fault */
1399 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1400 ((env->hflags & HF_CPL_MASK) == 3), 0);
1401 if (ret < 0)
1402 return 0; /* not an MMU fault */
1403 if (ret == 0)
1404 return 1; /* the MMU fault was handled without causing real CPU fault */
1405 /* now we have a real cpu fault */
1406 tb = tb_find_pc(pc);
1407 if (tb) {
1408 /* the PC is inside the translated code. It means that we have
1409 a virtual CPU fault */
1410 cpu_restore_state(tb, env, pc, puc);
1411 }
1412 if (ret == 1) {
1413#if 0
1414 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
1415 env->eip, env->cr[2], env->error_code);
1416#endif
1417 /* we restore the process signal mask as the sigreturn should
1418 do it (XXX: use sigsetjmp) */
1419 sigprocmask(SIG_SETMASK, old_set, NULL);
1420 raise_exception_err(env->exception_index, env->error_code);
1421 } else {
1422 /* activate soft MMU for this block */
1423 env->hflags |= HF_SOFTMMU_MASK;
1424 cpu_resume_from_signal(env, puc);
1425 }
1426 /* never comes here */
1427 return 1;
1428}
1429
1430#elif defined(TARGET_ARM)
1431static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1432 int is_write, sigset_t *old_set,
1433 void *puc)
1434{
1435 TranslationBlock *tb;
1436 int ret;
1437
1438 if (cpu_single_env)
1439 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1440#if defined(DEBUG_SIGNAL)
1441 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1442 pc, address, is_write, *(unsigned long *)old_set);
1443#endif
1444 /* XXX: locking issue */
1445 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1446 return 1;
1447 }
1448 /* see if it is an MMU fault */
1449 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1450 if (ret < 0)
1451 return 0; /* not an MMU fault */
1452 if (ret == 0)
1453 return 1; /* the MMU fault was handled without causing real CPU fault */
1454 /* now we have a real cpu fault */
1455 tb = tb_find_pc(pc);
1456 if (tb) {
1457 /* the PC is inside the translated code. It means that we have
1458 a virtual CPU fault */
1459 cpu_restore_state(tb, env, pc, puc);
1460 }
1461 /* we restore the process signal mask as the sigreturn should
1462 do it (XXX: use sigsetjmp) */
1463 sigprocmask(SIG_SETMASK, old_set, NULL);
1464 cpu_loop_exit();
1465}
1466#elif defined(TARGET_SPARC)
1467static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1468 int is_write, sigset_t *old_set,
1469 void *puc)
1470{
1471 TranslationBlock *tb;
1472 int ret;
1473
1474 if (cpu_single_env)
1475 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1476#if defined(DEBUG_SIGNAL)
1477 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1478 pc, address, is_write, *(unsigned long *)old_set);
1479#endif
1480 /* XXX: locking issue */
1481 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1482 return 1;
1483 }
1484 /* see if it is an MMU fault */
1485 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1486 if (ret < 0)
1487 return 0; /* not an MMU fault */
1488 if (ret == 0)
1489 return 1; /* the MMU fault was handled without causing real CPU fault */
1490 /* now we have a real cpu fault */
1491 tb = tb_find_pc(pc);
1492 if (tb) {
1493 /* the PC is inside the translated code. It means that we have
1494 a virtual CPU fault */
1495 cpu_restore_state(tb, env, pc, puc);
1496 }
1497 /* we restore the process signal mask as the sigreturn should
1498 do it (XXX: use sigsetjmp) */
1499 sigprocmask(SIG_SETMASK, old_set, NULL);
1500 cpu_loop_exit();
1501}
1502#elif defined (TARGET_PPC)
1503static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1504 int is_write, sigset_t *old_set,
1505 void *puc)
1506{
1507 TranslationBlock *tb;
1508 int ret;
1509
1510 if (cpu_single_env)
1511 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1512#if defined(DEBUG_SIGNAL)
1513 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1514 pc, address, is_write, *(unsigned long *)old_set);
1515#endif
1516 /* XXX: locking issue */
1517 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1518 return 1;
1519 }
1520
1521 /* see if it is an MMU fault */
1522 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1523 if (ret < 0)
1524 return 0; /* not an MMU fault */
1525 if (ret == 0)
1526 return 1; /* the MMU fault was handled without causing real CPU fault */
1527
1528 /* now we have a real cpu fault */
1529 tb = tb_find_pc(pc);
1530 if (tb) {
1531 /* the PC is inside the translated code. It means that we have
1532 a virtual CPU fault */
1533 cpu_restore_state(tb, env, pc, puc);
1534 }
1535 if (ret == 1) {
1536#if 0
1537 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1538 env->nip, env->error_code, tb);
1539#endif
1540 /* we restore the process signal mask as the sigreturn should
1541 do it (XXX: use sigsetjmp) */
1542 sigprocmask(SIG_SETMASK, old_set, NULL);
1543 do_raise_exception_err(env->exception_index, env->error_code);
1544 } else {
1545 /* activate soft MMU for this block */
1546 cpu_resume_from_signal(env, puc);
1547 }
1548 /* never comes here */
1549 return 1;
1550}
1551
1552#elif defined(TARGET_M68K)
1553static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1554 int is_write, sigset_t *old_set,
1555 void *puc)
1556{
1557 TranslationBlock *tb;
1558 int ret;
1559
1560 if (cpu_single_env)
1561 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1562#if defined(DEBUG_SIGNAL)
1563 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1564 pc, address, is_write, *(unsigned long *)old_set);
1565#endif
1566 /* XXX: locking issue */
1567 if (is_write && page_unprotect(address, pc, puc)) {
1568 return 1;
1569 }
1570 /* see if it is an MMU fault */
1571 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1572 if (ret < 0)
1573 return 0; /* not an MMU fault */
1574 if (ret == 0)
1575 return 1; /* the MMU fault was handled without causing real CPU fault */
1576 /* now we have a real cpu fault */
1577 tb = tb_find_pc(pc);
1578 if (tb) {
1579 /* the PC is inside the translated code. It means that we have
1580 a virtual CPU fault */
1581 cpu_restore_state(tb, env, pc, puc);
1582 }
1583 /* we restore the process signal mask as the sigreturn should
1584 do it (XXX: use sigsetjmp) */
1585 sigprocmask(SIG_SETMASK, old_set, NULL);
1586 cpu_loop_exit();
1587 /* never comes here */
1588 return 1;
1589}
1590
1591#elif defined (TARGET_MIPS)
1592static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1593 int is_write, sigset_t *old_set,
1594 void *puc)
1595{
1596 TranslationBlock *tb;
1597 int ret;
1598
1599 if (cpu_single_env)
1600 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1601#if defined(DEBUG_SIGNAL)
1602 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1603 pc, address, is_write, *(unsigned long *)old_set);
1604#endif
1605 /* XXX: locking issue */
1606 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1607 return 1;
1608 }
1609
1610 /* see if it is an MMU fault */
1611 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1612 if (ret < 0)
1613 return 0; /* not an MMU fault */
1614 if (ret == 0)
1615 return 1; /* the MMU fault was handled without causing real CPU fault */
1616
1617 /* now we have a real cpu fault */
1618 tb = tb_find_pc(pc);
1619 if (tb) {
1620 /* the PC is inside the translated code. It means that we have
1621 a virtual CPU fault */
1622 cpu_restore_state(tb, env, pc, puc);
1623 }
1624 if (ret == 1) {
1625#if 0
1626 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1627 env->nip, env->error_code, tb);
1628#endif
1629 /* we restore the process signal mask as the sigreturn should
1630 do it (XXX: use sigsetjmp) */
1631 sigprocmask(SIG_SETMASK, old_set, NULL);
1632 do_raise_exception_err(env->exception_index, env->error_code);
1633 } else {
1634 /* activate soft MMU for this block */
1635 cpu_resume_from_signal(env, puc);
1636 }
1637 /* never comes here */
1638 return 1;
1639}
1640
1641#elif defined (TARGET_SH4)
1642static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1643 int is_write, sigset_t *old_set,
1644 void *puc)
1645{
1646 TranslationBlock *tb;
1647 int ret;
1648
1649 if (cpu_single_env)
1650 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1651#if defined(DEBUG_SIGNAL)
1652 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1653 pc, address, is_write, *(unsigned long *)old_set);
1654#endif
1655 /* XXX: locking issue */
1656 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1657 return 1;
1658 }
1659
1660 /* see if it is an MMU fault */
1661 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1662 if (ret < 0)
1663 return 0; /* not an MMU fault */
1664 if (ret == 0)
1665 return 1; /* the MMU fault was handled without causing real CPU fault */
1666
1667 /* now we have a real cpu fault */
1668 tb = tb_find_pc(pc);
1669 if (tb) {
1670 /* the PC is inside the translated code. It means that we have
1671 a virtual CPU fault */
1672 cpu_restore_state(tb, env, pc, puc);
1673 }
1674#if 0
1675 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1676 env->nip, env->error_code, tb);
1677#endif
1678 /* we restore the process signal mask as the sigreturn should
1679 do it (XXX: use sigsetjmp) */
1680 sigprocmask(SIG_SETMASK, old_set, NULL);
1681 cpu_loop_exit();
1682 /* never comes here */
1683 return 1;
1684}
1685#else
1686#error unsupported target CPU
1687#endif
1688
1689#if defined(__i386__)
1690
1691#if defined(USE_CODE_COPY)
1692static void cpu_send_trap(unsigned long pc, int trap,
1693 struct ucontext *uc)
1694{
1695 TranslationBlock *tb;
1696
1697 if (cpu_single_env)
1698 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1699 /* now we have a real cpu fault */
1700 tb = tb_find_pc(pc);
1701 if (tb) {
1702 /* the PC is inside the translated code. It means that we have
1703 a virtual CPU fault */
1704 cpu_restore_state(tb, env, pc, uc);
1705 }
1706 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
1707 raise_exception_err(trap, env->error_code);
1708}
1709#endif
1710
1711int cpu_signal_handler(int host_signum, void *pinfo,
1712 void *puc)
1713{
1714 siginfo_t *info = pinfo;
1715 struct ucontext *uc = puc;
1716 unsigned long pc;
1717 int trapno;
1718
1719#ifndef REG_EIP
1720/* for glibc 2.1 */
1721#define REG_EIP EIP
1722#define REG_ERR ERR
1723#define REG_TRAPNO TRAPNO
1724#endif
1725 pc = uc->uc_mcontext.gregs[REG_EIP];
1726 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1727#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1728 if (trapno == 0x00 || trapno == 0x05) {
1729 /* send division by zero or bound exception */
1730 cpu_send_trap(pc, trapno, uc);
1731 return 1;
1732 } else
1733#endif
1734 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1735 trapno == 0xe ?
1736 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1737 &uc->uc_sigmask, puc);
1738}
1739
1740#elif defined(__x86_64__)
1741
1742int cpu_signal_handler(int host_signum, void *pinfo,
1743 void *puc)
1744{
1745 siginfo_t *info = pinfo;
1746 struct ucontext *uc = puc;
1747 unsigned long pc;
1748
1749 pc = uc->uc_mcontext.gregs[REG_RIP];
1750 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1751 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1752 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1753 &uc->uc_sigmask, puc);
1754}
1755
1756#elif defined(__powerpc__)
1757
1758/***********************************************************************
1759 * signal context platform-specific definitions
1760 * From Wine
1761 */
1762#ifdef linux
1763/* All Registers access - only for local access */
1764# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1765/* Gpr Registers access */
1766# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1767# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1768# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1769# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1770# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1771# define LR_sig(context) REG_sig(link, context) /* Link register */
1772# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1773/* Float Registers access */
1774# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1775# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1776/* Exception Registers access */
1777# define DAR_sig(context) REG_sig(dar, context)
1778# define DSISR_sig(context) REG_sig(dsisr, context)
1779# define TRAP_sig(context) REG_sig(trap, context)
1780#endif /* linux */
1781
1782#ifdef __APPLE__
1783# include <sys/ucontext.h>
1784typedef struct ucontext SIGCONTEXT;
1785/* All Registers access - only for local access */
1786# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1787# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1788# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1789# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1790/* Gpr Registers access */
1791# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1792# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1793# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1794# define CTR_sig(context) REG_sig(ctr, context)
1795# define XER_sig(context) REG_sig(xer, context) /* Link register */
1796# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1797# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1798/* Float Registers access */
1799# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1800# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1801/* Exception Registers access */
1802# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1803# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1804# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1805#endif /* __APPLE__ */
1806
1807int cpu_signal_handler(int host_signum, void *pinfo,
1808 void *puc)
1809{
1810 siginfo_t *info = pinfo;
1811 struct ucontext *uc = puc;
1812 unsigned long pc;
1813 int is_write;
1814
1815 pc = IAR_sig(uc);
1816 is_write = 0;
1817#if 0
1818 /* ppc 4xx case */
1819 if (DSISR_sig(uc) & 0x00800000)
1820 is_write = 1;
1821#else
1822 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1823 is_write = 1;
1824#endif
1825 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1826 is_write, &uc->uc_sigmask, puc);
1827}
1828
1829#elif defined(__alpha__)
1830
1831int cpu_signal_handler(int host_signum, void *pinfo,
1832 void *puc)
1833{
1834 siginfo_t *info = pinfo;
1835 struct ucontext *uc = puc;
1836 uint32_t *pc = uc->uc_mcontext.sc_pc;
1837 uint32_t insn = *pc;
1838 int is_write = 0;
1839
1840 /* XXX: need kernel patch to get write flag faster */
1841 switch (insn >> 26) {
1842 case 0x0d: // stw
1843 case 0x0e: // stb
1844 case 0x0f: // stq_u
1845 case 0x24: // stf
1846 case 0x25: // stg
1847 case 0x26: // sts
1848 case 0x27: // stt
1849 case 0x2c: // stl
1850 case 0x2d: // stq
1851 case 0x2e: // stl_c
1852 case 0x2f: // stq_c
1853 is_write = 1;
1854 }
1855
1856 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1857 is_write, &uc->uc_sigmask, puc);
1858}
1859#elif defined(__sparc__)
1860
1861int cpu_signal_handler(int host_signum, void *pinfo,
1862 void *puc)
1863{
1864 siginfo_t *info = pinfo;
1865 uint32_t *regs = (uint32_t *)(info + 1);
1866 void *sigmask = (regs + 20);
1867 unsigned long pc;
1868 int is_write;
1869 uint32_t insn;
1870
1871 /* XXX: is there a standard glibc define ? */
1872 pc = regs[1];
1873 /* XXX: need kernel patch to get write flag faster */
1874 is_write = 0;
1875 insn = *(uint32_t *)pc;
1876 if ((insn >> 30) == 3) {
1877 switch((insn >> 19) & 0x3f) {
1878 case 0x05: // stb
1879 case 0x06: // sth
1880 case 0x04: // st
1881 case 0x07: // std
1882 case 0x24: // stf
1883 case 0x27: // stdf
1884 case 0x25: // stfsr
1885 is_write = 1;
1886 break;
1887 }
1888 }
1889 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1890 is_write, sigmask, NULL);
1891}
1892
1893#elif defined(__arm__)
1894
1895int cpu_signal_handler(int host_signum, void *pinfo,
1896 void *puc)
1897{
1898 siginfo_t *info = pinfo;
1899 struct ucontext *uc = puc;
1900 unsigned long pc;
1901 int is_write;
1902
1903 pc = uc->uc_mcontext.gregs[R15];
1904 /* XXX: compute is_write */
1905 is_write = 0;
1906 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1907 is_write,
1908 &uc->uc_sigmask, puc);
1909}
1910
1911#elif defined(__mc68000)
1912
1913int cpu_signal_handler(int host_signum, void *pinfo,
1914 void *puc)
1915{
1916 siginfo_t *info = pinfo;
1917 struct ucontext *uc = puc;
1918 unsigned long pc;
1919 int is_write;
1920
1921 pc = uc->uc_mcontext.gregs[16];
1922 /* XXX: compute is_write */
1923 is_write = 0;
1924 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1925 is_write,
1926 &uc->uc_sigmask, puc);
1927}
1928
1929#elif defined(__ia64)
1930
1931#ifndef __ISR_VALID
1932 /* This ought to be in <bits/siginfo.h>... */
1933# define __ISR_VALID 1
1934#endif
1935
1936int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1937{
1938 siginfo_t *info = pinfo;
1939 struct ucontext *uc = puc;
1940 unsigned long ip;
1941 int is_write = 0;
1942
1943 ip = uc->uc_mcontext.sc_ip;
1944 switch (host_signum) {
1945 case SIGILL:
1946 case SIGFPE:
1947 case SIGSEGV:
1948 case SIGBUS:
1949 case SIGTRAP:
1950 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1951 /* ISR.W (write-access) is bit 33: */
1952 is_write = (info->si_isr >> 33) & 1;
1953 break;
1954
1955 default:
1956 break;
1957 }
1958 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1959 is_write,
1960 &uc->uc_sigmask, puc);
1961}
1962
1963#elif defined(__s390__)
1964
1965int cpu_signal_handler(int host_signum, void *pinfo,
1966 void *puc)
1967{
1968 siginfo_t *info = pinfo;
1969 struct ucontext *uc = puc;
1970 unsigned long pc;
1971 int is_write;
1972
1973 pc = uc->uc_mcontext.psw.addr;
1974 /* XXX: compute is_write */
1975 is_write = 0;
1976 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1977 is_write,
1978 &uc->uc_sigmask, puc);
1979}
1980
1981#else
1982
1983#error host CPU specific signal handler needed
1984
1985#endif
1986
1987#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette