VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 13368

最後變更 在這個檔案從13368是 13301,由 vboxsync 提交於 16 年 前

more synchronization with QEMU - things get pretty hairy

  • 屬性 svn:eol-style 設為 native
檔案大小: 60.0 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125{
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169}
170
171static inline TranslationBlock *tb_find_fast(void)
172{
173 TranslationBlock *tb;
174 target_ulong cs_base, pc;
175 uint64_t flags;
176
177 /* we record a subset of the CPU state. It will
178 always be the same before a given translated block
179 is executed. */
180#if defined(TARGET_I386)
181 flags = env->hflags;
182 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
183 cs_base = env->segs[R_CS].base;
184 pc = cs_base + env->eip;
185#elif defined(TARGET_ARM)
186 flags = env->thumb | (env->vfp.vec_len << 1)
187 | (env->vfp.vec_stride << 4);
188 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
189 flags |= (1 << 6);
190 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
191 flags |= (1 << 7);
192 flags |= (env->condexec_bits << 8);
193 cs_base = 0;
194 pc = env->regs[15];
195#elif defined(TARGET_SPARC)
196#ifdef TARGET_SPARC64
197 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
198 flags = ((env->pstate & PS_AM) << 2)
199 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
200 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
201#else
202 // FPU enable . Supervisor
203 flags = (env->psref << 4) | env->psrs;
204#endif
205 cs_base = env->npc;
206 pc = env->pc;
207#elif defined(TARGET_PPC)
208 flags = env->hflags;
209 cs_base = 0;
210 pc = env->nip;
211#elif defined(TARGET_MIPS)
212 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
213 cs_base = 0;
214 pc = env->active_tc.PC;
215#elif defined(TARGET_M68K)
216 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
217 | (env->sr & SR_S) /* Bit 13 */
218 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
219 cs_base = 0;
220 pc = env->pc;
221#elif defined(TARGET_SH4)
222 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
223 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
224 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
225 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
226 cs_base = 0;
227 pc = env->pc;
228#elif defined(TARGET_ALPHA)
229 flags = env->ps;
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_CRIS)
233 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
234 flags |= env->dslot;
235 cs_base = 0;
236 pc = env->pc;
237#else
238#error unsupported CPU
239#endif
240 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
241 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
242 tb->flags != flags)) {
243 tb = tb_find_slow(pc, cs_base, flags);
244 }
245 return tb;
246}
247
248/* main execution loop */
249
250#ifdef VBOX
251
252int cpu_exec(CPUState *env1)
253{
254#define DECLARE_HOST_REGS 1
255#include "hostregs_helper.h"
256 int ret, interrupt_request;
257 TranslationBlock *tb;
258 uint8_t *tc_ptr;
259 unsigned long next_tb;
260
261 if (cpu_halted(env1) == EXCP_HALTED)
262 return EXCP_HALTED;
263
264 cpu_single_env = env1;
265
266 /* first we save global registers */
267#define SAVE_HOST_REGS 1
268#include "hostregs_helper.h"
269 env = env1;
270
271 env_to_regs();
272#if defined(TARGET_I386)
273 /* put eflags in CPU temporary format */
274 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
275 DF = 1 - (2 * ((env->eflags >> 10) & 1));
276 CC_OP = CC_OP_EFLAGS;
277 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
278#elif defined(TARGET_SPARC)
279#elif defined(TARGET_M68K)
280 env->cc_op = CC_OP_FLAGS;
281 env->cc_dest = env->sr & 0xf;
282 env->cc_x = (env->sr >> 4) & 1;
283#elif defined(TARGET_ALPHA)
284#elif defined(TARGET_ARM)
285#elif defined(TARGET_PPC)
286#elif defined(TARGET_MIPS)
287#elif defined(TARGET_SH4)
288#elif defined(TARGET_CRIS)
289 /* XXXXX */
290#else
291#error unsupported target CPU
292#endif
293#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
294 env->exception_index = -1;
295#endif
296
297 /* prepare setjmp context for exception handling */
298 for(;;) {
299 if (setjmp(env->jmp_env) == 0)
300 {
301 env->current_tb = NULL;
302 VMMR3Unlock(env->pVM);
303 VMMR3Lock(env->pVM);
304
305 /*
306 * Check for fatal errors first
307 */
308 if (env->interrupt_request & CPU_INTERRUPT_RC) {
309 env->exception_index = EXCP_RC;
310 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
311 ret = env->exception_index;
312 cpu_loop_exit();
313 }
314
315 /* if an exception is pending, we execute it here */
316 if (env->exception_index >= 0) {
317 Assert(!env->user_mode_only);
318 if (env->exception_index >= EXCP_INTERRUPT) {
319 /* exit request from the cpu execution loop */
320 ret = env->exception_index;
321 break;
322 } else {
323 /* simulate a real cpu exception. On i386, it can
324 trigger new exceptions, but we do not handle
325 double or triple faults yet. */
326 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
327 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
328 do_interrupt(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip, 0);
332 /* successfully delivered */
333 env->old_exception = -1;
334 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
335 }
336 env->exception_index = -1;
337 }
338
339 next_tb = 0; /* force lookup of first TB */
340 for(;;)
341 {
342 interrupt_request = env->interrupt_request;
343 if (__builtin_expect(interrupt_request, 0))
344 {
345 /** @todo: reconscille with what QEMU really does */
346
347 /* Single instruction exec request, we execute it and return (one way or the other).
348 The caller will always reschedule after doing this operation! */
349 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
350 {
351 /* not in flight are we? (if we are, we trapped) */
352 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
353 {
354 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
355 env->exception_index = EXCP_SINGLE_INSTR;
356 if (emulate_single_instr(env) == -1)
357 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
358
359 /* When we receive an external interrupt during execution of this single
360 instruction, then we should stay here. We will leave when we're ready
361 for raw-mode or when interrupted by pending EMT requests. */
362 interrupt_request = env->interrupt_request; /* reload this! */
363 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
364 || !(env->eflags & IF_MASK)
365 || (env->hflags & HF_INHIBIT_IRQ_MASK)
366 || (env->state & CPU_RAW_HWACC)
367 )
368 {
369 env->exception_index = ret = EXCP_SINGLE_INSTR;
370 cpu_loop_exit();
371 }
372 }
373 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
374 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
375 }
376
377 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
381 do_smm_enter();
382 next_tb = 0;
383 }
384 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
385 (env->eflags & IF_MASK) &&
386 !(env->hflags & HF_INHIBIT_IRQ_MASK))
387 {
388 /* if hardware interrupt pending, we execute it */
389 int intno;
390 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
391 intno = cpu_get_pic_interrupt(env);
392 if (intno >= 0)
393 {
394 Log(("do_interrupt %d\n", intno));
395 do_interrupt(intno, 0, 0, 0, 1);
396 }
397 /* ensure that no TB jump will be modified as
398 the program flow was changed */
399 next_tb = 0;
400 }
401 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
402 {
403 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
406 next_tb = 0;
407 }
408 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
409 if (interrupt_request & CPU_INTERRUPT_EXIT)
410 {
411 env->exception_index = EXCP_INTERRUPT;
412 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
413 ret = env->exception_index;
414 cpu_loop_exit();
415 }
416 if (interrupt_request & CPU_INTERRUPT_RC)
417 {
418 env->exception_index = EXCP_RC;
419 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
420 ret = env->exception_index;
421 cpu_loop_exit();
422 }
423 }
424
425 /*
426 * Check if we the CPU state allows us to execute the code in raw-mode.
427 */
428 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
429 if (remR3CanExecuteRaw(env,
430 env->eip + env->segs[R_CS].base,
431 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
432 &env->exception_index))
433 {
434 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
435 ret = env->exception_index;
436 cpu_loop_exit();
437 }
438 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
439
440 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
441 spin_lock(&tb_lock);
442 tb = tb_find_fast();
443 /* Note: we do it here to avoid a gcc bug on Mac OS X when
444 doing it in tb_find_slow */
445 if (tb_invalidated_flag) {
446 /* as some TB could have been invalidated because
447 of memory exceptions while generating the code, we
448 must recompute the hash index here */
449 next_tb = 0;
450 tb_invalidated_flag = 0;
451 }
452
453 /* see if we can patch the calling TB. When the TB
454 spans two pages, we cannot safely do a direct
455 jump. */
456 if (next_tb != 0
457 && !(tb->cflags & CF_RAW_MODE)
458 && tb->page_addr[1] == -1)
459 {
460 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
461 }
462 spin_unlock(&tb_lock);
463 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
464
465 env->current_tb = tb;
466 while (env->current_tb) {
467 tc_ptr = tb->tc_ptr;
468 /* execute the generated code */
469 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
470 next_tb = tcg_qemu_tb_exec(tc_ptr);
471 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
472 env->current_tb = NULL;
473 if ((next_tb & 3) == 2) {
474 /* Instruction counter expired. */
475 int insns_left;
476 tb = (TranslationBlock *)(long)(next_tb & ~3);
477 /* Restore PC. */
478 CPU_PC_FROM_TB(env, tb);
479 insns_left = env->icount_decr.u32;
480 if (env->icount_extra && insns_left >= 0) {
481 /* Refill decrementer and continue execution. */
482 env->icount_extra += insns_left;
483 if (env->icount_extra > 0xffff) {
484 insns_left = 0xffff;
485 } else {
486 insns_left = env->icount_extra;
487 }
488 env->icount_extra -= insns_left;
489 env->icount_decr.u16.low = insns_left;
490 } else {
491 if (insns_left > 0) {
492 /* Execute remaining instructions. */
493 cpu_exec_nocache(insns_left, tb);
494 }
495 env->exception_index = EXCP_INTERRUPT;
496 next_tb = 0;
497 cpu_loop_exit();
498 }
499 }
500 }
501
502 /* reset soft MMU for next block (it can currently
503 only be set by a memory fault) */
504#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
505 if (env->hflags & HF_SOFTMMU_MASK) {
506 env->hflags &= ~HF_SOFTMMU_MASK;
507 /* do not allow linking to another block */
508 next_tb = 0;
509 }
510#endif
511 } /* for(;;) */
512 } else {
513 env_to_regs();
514 }
515#ifdef VBOX_HIGH_RES_TIMERS_HACK
516 /* NULL the current_tb here so cpu_interrupt() doesn't do
517 anything unnecessary (like crashing during emulate single instruction). */
518 env->current_tb = NULL;
519 TMTimerPoll(env1->pVM);
520#endif
521 } /* for(;;) */
522
523#if defined(TARGET_I386)
524 /* restore flags in standard format */
525 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
526#else
527#error unsupported target CPU
528#endif
529#include "hostregs_helper.h"
530 return ret;
531}
532
533#else /* !VBOX */
534int cpu_exec(CPUState *env1)
535{
536#define DECLARE_HOST_REGS 1
537#include "hostregs_helper.h"
538 int ret, interrupt_request;
539 TranslationBlock *tb;
540 uint8_t *tc_ptr;
541 unsigned long next_tb;
542
543 if (cpu_halted(env1) == EXCP_HALTED)
544 return EXCP_HALTED;
545
546 cpu_single_env = env1;
547
548 /* first we save global registers */
549#define SAVE_HOST_REGS 1
550#include "hostregs_helper.h"
551 env = env1;
552
553 env_to_regs();
554#if defined(TARGET_I386)
555 /* put eflags in CPU temporary format */
556 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
557 DF = 1 - (2 * ((env->eflags >> 10) & 1));
558 CC_OP = CC_OP_EFLAGS;
559 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
560#elif defined(TARGET_SPARC)
561#elif defined(TARGET_M68K)
562 env->cc_op = CC_OP_FLAGS;
563 env->cc_dest = env->sr & 0xf;
564 env->cc_x = (env->sr >> 4) & 1;
565#elif defined(TARGET_ALPHA)
566#elif defined(TARGET_ARM)
567#elif defined(TARGET_PPC)
568#elif defined(TARGET_MIPS)
569#elif defined(TARGET_SH4)
570#elif defined(TARGET_CRIS)
571 /* XXXXX */
572#else
573#error unsupported target CPU
574#endif
575 env->exception_index = -1;
576
577 /* prepare setjmp context for exception handling */
578 for(;;) {
579 if (setjmp(env->jmp_env) == 0) {
580 env->current_tb = NULL;
581 /* if an exception is pending, we execute it here */
582 if (env->exception_index >= 0) {
583 if (env->exception_index >= EXCP_INTERRUPT) {
584 /* exit request from the cpu execution loop */
585 ret = env->exception_index;
586 break;
587 } else if (env->user_mode_only) {
588 /* if user mode only, we simulate a fake exception
589 which will be handled outside the cpu execution
590 loop */
591#if defined(TARGET_I386)
592 do_interrupt_user(env->exception_index,
593 env->exception_is_int,
594 env->error_code,
595 env->exception_next_eip);
596 /* successfully delivered */
597 env->old_exception = -1;
598#endif
599 ret = env->exception_index;
600 break;
601 } else {
602#if defined(TARGET_I386)
603 /* simulate a real cpu exception. On i386, it can
604 trigger new exceptions, but we do not handle
605 double or triple faults yet. */
606 do_interrupt(env->exception_index,
607 env->exception_is_int,
608 env->error_code,
609 env->exception_next_eip, 0);
610 /* successfully delivered */
611 env->old_exception = -1;
612#elif defined(TARGET_PPC)
613 do_interrupt(env);
614#elif defined(TARGET_MIPS)
615 do_interrupt(env);
616#elif defined(TARGET_SPARC)
617 do_interrupt(env);
618#elif defined(TARGET_ARM)
619 do_interrupt(env);
620#elif defined(TARGET_SH4)
621 do_interrupt(env);
622#elif defined(TARGET_ALPHA)
623 do_interrupt(env);
624#elif defined(TARGET_CRIS)
625 do_interrupt(env);
626#elif defined(TARGET_M68K)
627 do_interrupt(0);
628#endif
629 }
630 env->exception_index = -1;
631 }
632#ifdef USE_KQEMU
633 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
634 int ret;
635 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
636 ret = kqemu_cpu_exec(env);
637 /* put eflags in CPU temporary format */
638 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
639 DF = 1 - (2 * ((env->eflags >> 10) & 1));
640 CC_OP = CC_OP_EFLAGS;
641 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
642 if (ret == 1) {
643 /* exception */
644 longjmp(env->jmp_env, 1);
645 } else if (ret == 2) {
646 /* softmmu execution needed */
647 } else {
648 if (env->interrupt_request != 0) {
649 /* hardware interrupt will be executed just after */
650 } else {
651 /* otherwise, we restart */
652 longjmp(env->jmp_env, 1);
653 }
654 }
655 }
656#endif
657
658 next_tb = 0; /* force lookup of first TB */
659 for(;;) {
660 interrupt_request = env->interrupt_request;
661 if (unlikely(interrupt_request) &&
662 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
663 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
664 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
665 env->exception_index = EXCP_DEBUG;
666 cpu_loop_exit();
667 }
668#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
669 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
670 if (interrupt_request & CPU_INTERRUPT_HALT) {
671 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
672 env->halted = 1;
673 env->exception_index = EXCP_HLT;
674 cpu_loop_exit();
675 }
676#endif
677#if defined(TARGET_I386)
678 if (env->hflags2 & HF2_GIF_MASK) {
679 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
680 !(env->hflags & HF_SMM_MASK)) {
681 svm_check_intercept(SVM_EXIT_SMI);
682 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
683 do_smm_enter();
684 next_tb = 0;
685 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
686 !(env->hflags2 & HF2_NMI_MASK)) {
687 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
688 env->hflags2 |= HF2_NMI_MASK;
689 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
690 next_tb = 0;
691 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
692 (((env->hflags2 & HF2_VINTR_MASK) &&
693 (env->hflags2 & HF2_HIF_MASK)) ||
694 (!(env->hflags2 & HF2_VINTR_MASK) &&
695 (env->eflags & IF_MASK &&
696 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
697 int intno;
698 svm_check_intercept(SVM_EXIT_INTR);
699 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
700 intno = cpu_get_pic_interrupt(env);
701 if (loglevel & CPU_LOG_TB_IN_ASM) {
702 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
703 }
704 do_interrupt(intno, 0, 0, 0, 1);
705 /* ensure that no TB jump will be modified as
706 the program flow was changed */
707 next_tb = 0;
708#if !defined(CONFIG_USER_ONLY)
709 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
710 (env->eflags & IF_MASK) &&
711 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
712 int intno;
713 /* FIXME: this should respect TPR */
714 svm_check_intercept(SVM_EXIT_VINTR);
715 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
716 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
717 if (loglevel & CPU_LOG_TB_IN_ASM)
718 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
719 do_interrupt(intno, 0, 0, 0, 1);
720 next_tb = 0;
721#endif
722 }
723 }
724#elif defined(TARGET_PPC)
725#if 0
726 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
727 cpu_ppc_reset(env);
728 }
729#endif
730 if (interrupt_request & CPU_INTERRUPT_HARD) {
731 ppc_hw_interrupt(env);
732 if (env->pending_interrupts == 0)
733 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
734 next_tb = 0;
735 }
736#elif defined(TARGET_MIPS)
737 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
738 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
739 (env->CP0_Status & (1 << CP0St_IE)) &&
740 !(env->CP0_Status & (1 << CP0St_EXL)) &&
741 !(env->CP0_Status & (1 << CP0St_ERL)) &&
742 !(env->hflags & MIPS_HFLAG_DM)) {
743 /* Raise it */
744 env->exception_index = EXCP_EXT_INTERRUPT;
745 env->error_code = 0;
746 do_interrupt(env);
747 next_tb = 0;
748 }
749#elif defined(TARGET_SPARC)
750 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
751 (env->psret != 0)) {
752 int pil = env->interrupt_index & 15;
753 int type = env->interrupt_index & 0xf0;
754
755 if (((type == TT_EXTINT) &&
756 (pil == 15 || pil > env->psrpil)) ||
757 type != TT_EXTINT) {
758 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
759 env->exception_index = env->interrupt_index;
760 do_interrupt(env);
761 env->interrupt_index = 0;
762#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
763 cpu_check_irqs(env);
764#endif
765 next_tb = 0;
766 }
767 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
768 //do_interrupt(0, 0, 0, 0, 0);
769 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
770 }
771#elif defined(TARGET_ARM)
772 if (interrupt_request & CPU_INTERRUPT_FIQ
773 && !(env->uncached_cpsr & CPSR_F)) {
774 env->exception_index = EXCP_FIQ;
775 do_interrupt(env);
776 next_tb = 0;
777 }
778 /* ARMv7-M interrupt return works by loading a magic value
779 into the PC. On real hardware the load causes the
780 return to occur. The qemu implementation performs the
781 jump normally, then does the exception return when the
782 CPU tries to execute code at the magic address.
783 This will cause the magic PC value to be pushed to
784 the stack if an interrupt occured at the wrong time.
785 We avoid this by disabling interrupts when
786 pc contains a magic address. */
787 if (interrupt_request & CPU_INTERRUPT_HARD
788 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
789 || !(env->uncached_cpsr & CPSR_I))) {
790 env->exception_index = EXCP_IRQ;
791 do_interrupt(env);
792 next_tb = 0;
793 }
794#elif defined(TARGET_SH4)
795 if (interrupt_request & CPU_INTERRUPT_HARD) {
796 do_interrupt(env);
797 next_tb = 0;
798 }
799#elif defined(TARGET_ALPHA)
800 if (interrupt_request & CPU_INTERRUPT_HARD) {
801 do_interrupt(env);
802 next_tb = 0;
803 }
804#elif defined(TARGET_CRIS)
805 if (interrupt_request & CPU_INTERRUPT_HARD
806 && (env->pregs[PR_CCS] & I_FLAG)) {
807 env->exception_index = EXCP_IRQ;
808 do_interrupt(env);
809 next_tb = 0;
810 }
811 if (interrupt_request & CPU_INTERRUPT_NMI
812 && (env->pregs[PR_CCS] & M_FLAG)) {
813 env->exception_index = EXCP_NMI;
814 do_interrupt(env);
815 next_tb = 0;
816 }
817#elif defined(TARGET_M68K)
818 if (interrupt_request & CPU_INTERRUPT_HARD
819 && ((env->sr & SR_I) >> SR_I_SHIFT)
820 < env->pending_level) {
821 /* Real hardware gets the interrupt vector via an
822 IACK cycle at this point. Current emulated
823 hardware doesn't rely on this, so we
824 provide/save the vector when the interrupt is
825 first signalled. */
826 env->exception_index = env->pending_vector;
827 do_interrupt(1);
828 next_tb = 0;
829 }
830#endif
831 /* Don't use the cached interupt_request value,
832 do_interrupt may have updated the EXITTB flag. */
833 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
834 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
835 /* ensure that no TB jump will be modified as
836 the program flow was changed */
837 next_tb = 0;
838 }
839 if (interrupt_request & CPU_INTERRUPT_EXIT) {
840 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
841 env->exception_index = EXCP_INTERRUPT;
842 cpu_loop_exit();
843 }
844 }
845#ifdef DEBUG_EXEC
846 if ((loglevel & CPU_LOG_TB_CPU)) {
847 /* restore flags in standard format */
848 regs_to_env();
849#if defined(TARGET_I386)
850 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
851 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
852 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
853#elif defined(TARGET_ARM)
854 cpu_dump_state(env, logfile, fprintf, 0);
855#elif defined(TARGET_SPARC)
856 cpu_dump_state(env, logfile, fprintf, 0);
857#elif defined(TARGET_PPC)
858 cpu_dump_state(env, logfile, fprintf, 0);
859#elif defined(TARGET_M68K)
860 cpu_m68k_flush_flags(env, env->cc_op);
861 env->cc_op = CC_OP_FLAGS;
862 env->sr = (env->sr & 0xffe0)
863 | env->cc_dest | (env->cc_x << 4);
864 cpu_dump_state(env, logfile, fprintf, 0);
865#elif defined(TARGET_MIPS)
866 cpu_dump_state(env, logfile, fprintf, 0);
867#elif defined(TARGET_SH4)
868 cpu_dump_state(env, logfile, fprintf, 0);
869#elif defined(TARGET_ALPHA)
870 cpu_dump_state(env, logfile, fprintf, 0);
871#elif defined(TARGET_CRIS)
872 cpu_dump_state(env, logfile, fprintf, 0);
873#else
874#error unsupported target CPU
875#endif
876 }
877#endif
878 spin_lock(&tb_lock);
879 tb = tb_find_fast();
880 /* Note: we do it here to avoid a gcc bug on Mac OS X when
881 doing it in tb_find_slow */
882 if (tb_invalidated_flag) {
883 /* as some TB could have been invalidated because
884 of memory exceptions while generating the code, we
885 must recompute the hash index here */
886 next_tb = 0;
887 tb_invalidated_flag = 0;
888 }
889#ifdef DEBUG_EXEC
890 if ((loglevel & CPU_LOG_EXEC)) {
891 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
892 (long)tb->tc_ptr, tb->pc,
893 lookup_symbol(tb->pc));
894 }
895#endif
896 /* see if we can patch the calling TB. When the TB
897 spans two pages, we cannot safely do a direct
898 jump. */
899 {
900 if (next_tb != 0 &&
901#ifdef USE_KQEMU
902 (env->kqemu_enabled != 2) &&
903#endif
904 tb->page_addr[1] == -1) {
905 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
906 }
907 }
908 spin_unlock(&tb_lock);
909 env->current_tb = tb;
910 while (env->current_tb) {
911 tc_ptr = tb->tc_ptr;
912 /* execute the generated code */
913#if defined(__sparc__) && !defined(HOST_SOLARIS)
914#undef env
915 env = cpu_single_env;
916#define env cpu_single_env
917#endif
918 next_tb = tcg_qemu_tb_exec(tc_ptr);
919 env->current_tb = NULL;
920 if ((next_tb & 3) == 2) {
921 /* Instruction counter expired. */
922 int insns_left;
923 tb = (TranslationBlock *)(long)(next_tb & ~3);
924 /* Restore PC. */
925 CPU_PC_FROM_TB(env, tb);
926 insns_left = env->icount_decr.u32;
927 if (env->icount_extra && insns_left >= 0) {
928 /* Refill decrementer and continue execution. */
929 env->icount_extra += insns_left;
930 if (env->icount_extra > 0xffff) {
931 insns_left = 0xffff;
932 } else {
933 insns_left = env->icount_extra;
934 }
935 env->icount_extra -= insns_left;
936 env->icount_decr.u16.low = insns_left;
937 } else {
938 if (insns_left > 0) {
939 /* Execute remaining instructions. */
940 cpu_exec_nocache(insns_left, tb);
941 }
942 env->exception_index = EXCP_INTERRUPT;
943 next_tb = 0;
944 cpu_loop_exit();
945 }
946 }
947 }
948 /* reset soft MMU for next block (it can currently
949 only be set by a memory fault) */
950#if defined(USE_KQEMU)
951#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
952 if (kqemu_is_ok(env) &&
953 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
954 cpu_loop_exit();
955 }
956#endif
957 } /* for(;;) */
958 } else {
959 env_to_regs();
960 }
961 } /* for(;;) */
962
963
964#if defined(TARGET_I386)
965 /* restore flags in standard format */
966 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
967#elif defined(TARGET_ARM)
968 /* XXX: Save/restore host fpu exception state?. */
969#elif defined(TARGET_SPARC)
970#elif defined(TARGET_PPC)
971#elif defined(TARGET_M68K)
972 cpu_m68k_flush_flags(env, env->cc_op);
973 env->cc_op = CC_OP_FLAGS;
974 env->sr = (env->sr & 0xffe0)
975 | env->cc_dest | (env->cc_x << 4);
976#elif defined(TARGET_MIPS)
977#elif defined(TARGET_SH4)
978#elif defined(TARGET_ALPHA)
979#elif defined(TARGET_CRIS)
980 /* XXXXX */
981#else
982#error unsupported target CPU
983#endif
984
985 /* restore global registers */
986#include "hostregs_helper.h"
987
988 /* fail safe : never use cpu_single_env outside cpu_exec() */
989 cpu_single_env = NULL;
990 return ret;
991}
992#endif /* !VBOX */
993
994/* must only be called from the generated code as an exception can be
995 generated */
996void tb_invalidate_page_range(target_ulong start, target_ulong end)
997{
998 /* XXX: cannot enable it yet because it yields to MMU exception
999 where NIP != read address on PowerPC */
1000#if 0
1001 target_ulong phys_addr;
1002 phys_addr = get_phys_addr_code(env, start);
1003 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1004#endif
1005}
1006
1007#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1008
1009void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1010{
1011 CPUX86State *saved_env;
1012
1013 saved_env = env;
1014 env = s;
1015 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1016 selector &= 0xffff;
1017 cpu_x86_load_seg_cache(env, seg_reg, selector,
1018 (selector << 4), 0xffff, 0);
1019 } else {
1020 load_seg(seg_reg, selector);
1021 }
1022 env = saved_env;
1023}
1024
1025void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1026{
1027 CPUX86State *saved_env;
1028
1029 saved_env = env;
1030 env = s;
1031
1032 helper_fsave((target_ulong)ptr, data32);
1033
1034 env = saved_env;
1035}
1036
1037void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1038{
1039 CPUX86State *saved_env;
1040
1041 saved_env = env;
1042 env = s;
1043
1044 helper_frstor((target_ulong)ptr, data32);
1045
1046 env = saved_env;
1047}
1048
1049#endif /* TARGET_I386 */
1050
1051#if !defined(CONFIG_SOFTMMU)
1052
1053#if defined(TARGET_I386)
1054
1055/* 'pc' is the host PC at which the exception was raised. 'address' is
1056 the effective address of the memory exception. 'is_write' is 1 if a
1057 write caused the exception and otherwise 0'. 'old_set' is the
1058 signal set which should be restored */
1059static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1060 int is_write, sigset_t *old_set,
1061 void *puc)
1062{
1063 TranslationBlock *tb;
1064 int ret;
1065
1066 if (cpu_single_env)
1067 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1068#if defined(DEBUG_SIGNAL)
1069 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc, address, is_write, *(unsigned long *)old_set);
1071#endif
1072 /* XXX: locking issue */
1073 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1074 return 1;
1075 }
1076
1077 /* see if it is an MMU fault */
1078 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1079 ((env->hflags & HF_CPL_MASK) == 3), 0);
1080 if (ret < 0)
1081 return 0; /* not an MMU fault */
1082 if (ret == 0)
1083 return 1; /* the MMU fault was handled without causing real CPU fault */
1084 /* now we have a real cpu fault */
1085 tb = tb_find_pc(pc);
1086 if (tb) {
1087 /* the PC is inside the translated code. It means that we have
1088 a virtual CPU fault */
1089 cpu_restore_state(tb, env, pc, puc);
1090 }
1091 if (ret == 1) {
1092#if 0
1093 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1094 env->eip, env->cr[2], env->error_code);
1095#endif
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK, old_set, NULL);
1099 raise_exception_err(env->exception_index, env->error_code);
1100 } else {
1101 /* activate soft MMU for this block */
1102 env->hflags |= HF_SOFTMMU_MASK;
1103 cpu_resume_from_signal(env, puc);
1104 }
1105 /* never comes here */
1106 return 1;
1107}
1108
1109#elif defined(TARGET_ARM)
1110static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1111 int is_write, sigset_t *old_set,
1112 void *puc)
1113{
1114 TranslationBlock *tb;
1115 int ret;
1116
1117 if (cpu_single_env)
1118 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1119#if defined(DEBUG_SIGNAL)
1120 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1121 pc, address, is_write, *(unsigned long *)old_set);
1122#endif
1123 /* XXX: locking issue */
1124 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1125 return 1;
1126 }
1127 /* see if it is an MMU fault */
1128 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1129 if (ret < 0)
1130 return 0; /* not an MMU fault */
1131 if (ret == 0)
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1133 /* now we have a real cpu fault */
1134 tb = tb_find_pc(pc);
1135 if (tb) {
1136 /* the PC is inside the translated code. It means that we have
1137 a virtual CPU fault */
1138 cpu_restore_state(tb, env, pc, puc);
1139 }
1140 /* we restore the process signal mask as the sigreturn should
1141 do it (XXX: use sigsetjmp) */
1142 sigprocmask(SIG_SETMASK, old_set, NULL);
1143 cpu_loop_exit();
1144}
1145#elif defined(TARGET_SPARC)
1146static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1147 int is_write, sigset_t *old_set,
1148 void *puc)
1149{
1150 TranslationBlock *tb;
1151 int ret;
1152
1153 if (cpu_single_env)
1154 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1155#if defined(DEBUG_SIGNAL)
1156 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1157 pc, address, is_write, *(unsigned long *)old_set);
1158#endif
1159 /* XXX: locking issue */
1160 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1161 return 1;
1162 }
1163 /* see if it is an MMU fault */
1164 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1165 if (ret < 0)
1166 return 0; /* not an MMU fault */
1167 if (ret == 0)
1168 return 1; /* the MMU fault was handled without causing real CPU fault */
1169 /* now we have a real cpu fault */
1170 tb = tb_find_pc(pc);
1171 if (tb) {
1172 /* the PC is inside the translated code. It means that we have
1173 a virtual CPU fault */
1174 cpu_restore_state(tb, env, pc, puc);
1175 }
1176 /* we restore the process signal mask as the sigreturn should
1177 do it (XXX: use sigsetjmp) */
1178 sigprocmask(SIG_SETMASK, old_set, NULL);
1179 cpu_loop_exit();
1180}
1181#elif defined (TARGET_PPC)
1182static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1183 int is_write, sigset_t *old_set,
1184 void *puc)
1185{
1186 TranslationBlock *tb;
1187 int ret;
1188
1189 if (cpu_single_env)
1190 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1191#if defined(DEBUG_SIGNAL)
1192 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1193 pc, address, is_write, *(unsigned long *)old_set);
1194#endif
1195 /* XXX: locking issue */
1196 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1197 return 1;
1198 }
1199
1200 /* see if it is an MMU fault */
1201 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1202 if (ret < 0)
1203 return 0; /* not an MMU fault */
1204 if (ret == 0)
1205 return 1; /* the MMU fault was handled without causing real CPU fault */
1206
1207 /* now we have a real cpu fault */
1208 tb = tb_find_pc(pc);
1209 if (tb) {
1210 /* the PC is inside the translated code. It means that we have
1211 a virtual CPU fault */
1212 cpu_restore_state(tb, env, pc, puc);
1213 }
1214 if (ret == 1) {
1215#if 0
1216 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1217 env->nip, env->error_code, tb);
1218#endif
1219 /* we restore the process signal mask as the sigreturn should
1220 do it (XXX: use sigsetjmp) */
1221 sigprocmask(SIG_SETMASK, old_set, NULL);
1222 do_raise_exception_err(env->exception_index, env->error_code);
1223 } else {
1224 /* activate soft MMU for this block */
1225 cpu_resume_from_signal(env, puc);
1226 }
1227 /* never comes here */
1228 return 1;
1229}
1230
1231#elif defined(TARGET_M68K)
1232static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1233 int is_write, sigset_t *old_set,
1234 void *puc)
1235{
1236 TranslationBlock *tb;
1237 int ret;
1238
1239 if (cpu_single_env)
1240 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1241#if defined(DEBUG_SIGNAL)
1242 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1243 pc, address, is_write, *(unsigned long *)old_set);
1244#endif
1245 /* XXX: locking issue */
1246 if (is_write && page_unprotect(address, pc, puc)) {
1247 return 1;
1248 }
1249 /* see if it is an MMU fault */
1250 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1251 if (ret < 0)
1252 return 0; /* not an MMU fault */
1253 if (ret == 0)
1254 return 1; /* the MMU fault was handled without causing real CPU fault */
1255 /* now we have a real cpu fault */
1256 tb = tb_find_pc(pc);
1257 if (tb) {
1258 /* the PC is inside the translated code. It means that we have
1259 a virtual CPU fault */
1260 cpu_restore_state(tb, env, pc, puc);
1261 }
1262 /* we restore the process signal mask as the sigreturn should
1263 do it (XXX: use sigsetjmp) */
1264 sigprocmask(SIG_SETMASK, old_set, NULL);
1265 cpu_loop_exit();
1266 /* never comes here */
1267 return 1;
1268}
1269
1270#elif defined (TARGET_MIPS)
1271static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1272 int is_write, sigset_t *old_set,
1273 void *puc)
1274{
1275 TranslationBlock *tb;
1276 int ret;
1277
1278 if (cpu_single_env)
1279 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1280#if defined(DEBUG_SIGNAL)
1281 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1282 pc, address, is_write, *(unsigned long *)old_set);
1283#endif
1284 /* XXX: locking issue */
1285 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1286 return 1;
1287 }
1288
1289 /* see if it is an MMU fault */
1290 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1291 if (ret < 0)
1292 return 0; /* not an MMU fault */
1293 if (ret == 0)
1294 return 1; /* the MMU fault was handled without causing real CPU fault */
1295
1296 /* now we have a real cpu fault */
1297 tb = tb_find_pc(pc);
1298 if (tb) {
1299 /* the PC is inside the translated code. It means that we have
1300 a virtual CPU fault */
1301 cpu_restore_state(tb, env, pc, puc);
1302 }
1303 if (ret == 1) {
1304#if 0
1305 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1306 env->nip, env->error_code, tb);
1307#endif
1308 /* we restore the process signal mask as the sigreturn should
1309 do it (XXX: use sigsetjmp) */
1310 sigprocmask(SIG_SETMASK, old_set, NULL);
1311 do_raise_exception_err(env->exception_index, env->error_code);
1312 } else {
1313 /* activate soft MMU for this block */
1314 cpu_resume_from_signal(env, puc);
1315 }
1316 /* never comes here */
1317 return 1;
1318}
1319
1320#elif defined (TARGET_SH4)
1321static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1322 int is_write, sigset_t *old_set,
1323 void *puc)
1324{
1325 TranslationBlock *tb;
1326 int ret;
1327
1328 if (cpu_single_env)
1329 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1330#if defined(DEBUG_SIGNAL)
1331 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1332 pc, address, is_write, *(unsigned long *)old_set);
1333#endif
1334 /* XXX: locking issue */
1335 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1336 return 1;
1337 }
1338
1339 /* see if it is an MMU fault */
1340 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1341 if (ret < 0)
1342 return 0; /* not an MMU fault */
1343 if (ret == 0)
1344 return 1; /* the MMU fault was handled without causing real CPU fault */
1345
1346 /* now we have a real cpu fault */
1347 tb = tb_find_pc(pc);
1348 if (tb) {
1349 /* the PC is inside the translated code. It means that we have
1350 a virtual CPU fault */
1351 cpu_restore_state(tb, env, pc, puc);
1352 }
1353#if 0
1354 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1355 env->nip, env->error_code, tb);
1356#endif
1357 /* we restore the process signal mask as the sigreturn should
1358 do it (XXX: use sigsetjmp) */
1359 sigprocmask(SIG_SETMASK, old_set, NULL);
1360 cpu_loop_exit();
1361 /* never comes here */
1362 return 1;
1363}
1364#else
1365#error unsupported target CPU
1366#endif
1367
1368#if defined(__i386__)
1369
1370#if defined(__APPLE__)
1371# include <sys/ucontext.h>
1372
1373# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1374# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1375# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1376#else
1377# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1378# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1379# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1380#endif
1381
1382int cpu_signal_handler(int host_signum, void *pinfo,
1383 void *puc)
1384{
1385 siginfo_t *info = pinfo;
1386 struct ucontext *uc = puc;
1387 unsigned long pc;
1388 int trapno;
1389
1390#ifndef REG_EIP
1391/* for glibc 2.1 */
1392#define REG_EIP EIP
1393#define REG_ERR ERR
1394#define REG_TRAPNO TRAPNO
1395#endif
1396 pc = uc->uc_mcontext.gregs[REG_EIP];
1397 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1398#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1399 if (trapno == 0x00 || trapno == 0x05) {
1400 /* send division by zero or bound exception */
1401 cpu_send_trap(pc, trapno, uc);
1402 return 1;
1403 } else
1404#endif
1405 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1406 trapno == 0xe ?
1407 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1408 &uc->uc_sigmask, puc);
1409}
1410
1411#elif defined(__x86_64__)
1412
1413int cpu_signal_handler(int host_signum, void *pinfo,
1414 void *puc)
1415{
1416 siginfo_t *info = pinfo;
1417 struct ucontext *uc = puc;
1418 unsigned long pc;
1419
1420 pc = uc->uc_mcontext.gregs[REG_RIP];
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1423 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1424 &uc->uc_sigmask, puc);
1425}
1426
1427#elif defined(__powerpc__)
1428
1429/***********************************************************************
1430 * signal context platform-specific definitions
1431 * From Wine
1432 */
1433#ifdef linux
1434/* All Registers access - only for local access */
1435# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1436/* Gpr Registers access */
1437# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1438# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1439# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1440# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1441# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1442# define LR_sig(context) REG_sig(link, context) /* Link register */
1443# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1444/* Float Registers access */
1445# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1446# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1447/* Exception Registers access */
1448# define DAR_sig(context) REG_sig(dar, context)
1449# define DSISR_sig(context) REG_sig(dsisr, context)
1450# define TRAP_sig(context) REG_sig(trap, context)
1451#endif /* linux */
1452
1453#ifdef __APPLE__
1454# include <sys/ucontext.h>
1455typedef struct ucontext SIGCONTEXT;
1456/* All Registers access - only for local access */
1457# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1458# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1459# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1460# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1461/* Gpr Registers access */
1462# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1463# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1464# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1465# define CTR_sig(context) REG_sig(ctr, context)
1466# define XER_sig(context) REG_sig(xer, context) /* Link register */
1467# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1468# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1469/* Float Registers access */
1470# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1471# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1472/* Exception Registers access */
1473# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1474# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1475# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1476#endif /* __APPLE__ */
1477
1478int cpu_signal_handler(int host_signum, void *pinfo,
1479 void *puc)
1480{
1481 siginfo_t *info = pinfo;
1482 struct ucontext *uc = puc;
1483 unsigned long pc;
1484 int is_write;
1485
1486 pc = IAR_sig(uc);
1487 is_write = 0;
1488#if 0
1489 /* ppc 4xx case */
1490 if (DSISR_sig(uc) & 0x00800000)
1491 is_write = 1;
1492#else
1493 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1494 is_write = 1;
1495#endif
1496 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1497 is_write, &uc->uc_sigmask, puc);
1498}
1499
1500#elif defined(__alpha__)
1501
1502int cpu_signal_handler(int host_signum, void *pinfo,
1503 void *puc)
1504{
1505 siginfo_t *info = pinfo;
1506 struct ucontext *uc = puc;
1507 uint32_t *pc = uc->uc_mcontext.sc_pc;
1508 uint32_t insn = *pc;
1509 int is_write = 0;
1510
1511 /* XXX: need kernel patch to get write flag faster */
1512 switch (insn >> 26) {
1513 case 0x0d: // stw
1514 case 0x0e: // stb
1515 case 0x0f: // stq_u
1516 case 0x24: // stf
1517 case 0x25: // stg
1518 case 0x26: // sts
1519 case 0x27: // stt
1520 case 0x2c: // stl
1521 case 0x2d: // stq
1522 case 0x2e: // stl_c
1523 case 0x2f: // stq_c
1524 is_write = 1;
1525 }
1526
1527 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1528 is_write, &uc->uc_sigmask, puc);
1529}
1530#elif defined(__sparc__)
1531
1532int cpu_signal_handler(int host_signum, void *pinfo,
1533 void *puc)
1534{
1535 siginfo_t *info = pinfo;
1536 uint32_t *regs = (uint32_t *)(info + 1);
1537 void *sigmask = (regs + 20);
1538 unsigned long pc;
1539 int is_write;
1540 uint32_t insn;
1541
1542 /* XXX: is there a standard glibc define ? */
1543 pc = regs[1];
1544 /* XXX: need kernel patch to get write flag faster */
1545 is_write = 0;
1546 insn = *(uint32_t *)pc;
1547 if ((insn >> 30) == 3) {
1548 switch((insn >> 19) & 0x3f) {
1549 case 0x05: // stb
1550 case 0x06: // sth
1551 case 0x04: // st
1552 case 0x07: // std
1553 case 0x24: // stf
1554 case 0x27: // stdf
1555 case 0x25: // stfsr
1556 is_write = 1;
1557 break;
1558 }
1559 }
1560 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1561 is_write, sigmask, NULL);
1562}
1563
1564#elif defined(__arm__)
1565
1566int cpu_signal_handler(int host_signum, void *pinfo,
1567 void *puc)
1568{
1569 siginfo_t *info = pinfo;
1570 struct ucontext *uc = puc;
1571 unsigned long pc;
1572 int is_write;
1573
1574 pc = uc->uc_mcontext.gregs[R15];
1575 /* XXX: compute is_write */
1576 is_write = 0;
1577 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1578 is_write,
1579 &uc->uc_sigmask, puc);
1580}
1581
1582#elif defined(__mc68000)
1583
1584int cpu_signal_handler(int host_signum, void *pinfo,
1585 void *puc)
1586{
1587 siginfo_t *info = pinfo;
1588 struct ucontext *uc = puc;
1589 unsigned long pc;
1590 int is_write;
1591
1592 pc = uc->uc_mcontext.gregs[16];
1593 /* XXX: compute is_write */
1594 is_write = 0;
1595 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1596 is_write,
1597 &uc->uc_sigmask, puc);
1598}
1599
1600#elif defined(__ia64)
1601
1602#ifndef __ISR_VALID
1603 /* This ought to be in <bits/siginfo.h>... */
1604# define __ISR_VALID 1
1605#endif
1606
1607int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1608{
1609 siginfo_t *info = pinfo;
1610 struct ucontext *uc = puc;
1611 unsigned long ip;
1612 int is_write = 0;
1613
1614 ip = uc->uc_mcontext.sc_ip;
1615 switch (host_signum) {
1616 case SIGILL:
1617 case SIGFPE:
1618 case SIGSEGV:
1619 case SIGBUS:
1620 case SIGTRAP:
1621 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1622 /* ISR.W (write-access) is bit 33: */
1623 is_write = (info->si_isr >> 33) & 1;
1624 break;
1625
1626 default:
1627 break;
1628 }
1629 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1630 is_write,
1631 &uc->uc_sigmask, puc);
1632}
1633
1634#elif defined(__s390__)
1635
1636int cpu_signal_handler(int host_signum, void *pinfo,
1637 void *puc)
1638{
1639 siginfo_t *info = pinfo;
1640 struct ucontext *uc = puc;
1641 unsigned long pc;
1642 int is_write;
1643
1644 pc = uc->uc_mcontext.psw.addr;
1645 /* XXX: compute is_write */
1646 is_write = 0;
1647 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1648 is_write,
1649 &uc->uc_sigmask, puc);
1650}
1651
1652#else
1653
1654#error host CPU specific signal handler needed
1655
1656#endif
1657
1658#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette