VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 20537

最後變更 在這個檔案從20537是 19821,由 vboxsync 提交於 16 年 前

TM: TMTimerPoll cleanup.

  • 屬性 svn:eol-style 設為 native
檔案大小: 61.0 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
112 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
113#else
114 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115#endif
116
117 if ((next_tb & 3) == 2) {
118 /* Restore PC. This may happen if async event occurs before
119 the TB starts executing. */
120 CPU_PC_FROM_TB(env, tb);
121 }
122 tb_phys_invalidate(tb, -1);
123 tb_free(tb);
124}
125
126static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
129{
130 TranslationBlock *tb, **ptb1;
131 unsigned int h;
132 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
133
134 tb_invalidated_flag = 0;
135
136 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
137
138 /* find translated block using physical mappings */
139 phys_pc = get_phys_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_phys_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
161 }
162 }
163 ptb1 = &tb->phys_hash_next;
164 }
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
168
169 found:
170 /* we add the TB in the virtual pc hash table */
171 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
172 return tb;
173}
174
175#ifndef VBOX
176static inline TranslationBlock *tb_find_fast(void)
177#else
178DECLINLINE(TranslationBlock *) tb_find_fast(void)
179#endif
180{
181 TranslationBlock *tb;
182 target_ulong cs_base, pc;
183 uint64_t flags;
184
185 /* we record a subset of the CPU state. It will
186 always be the same before a given translated block
187 is executed. */
188#if defined(TARGET_I386)
189 flags = env->hflags;
190 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
191 cs_base = env->segs[R_CS].base;
192 pc = cs_base + env->eip;
193#elif defined(TARGET_ARM)
194 flags = env->thumb | (env->vfp.vec_len << 1)
195 | (env->vfp.vec_stride << 4);
196 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
197 flags |= (1 << 6);
198 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
199 flags |= (1 << 7);
200 flags |= (env->condexec_bits << 8);
201 cs_base = 0;
202 pc = env->regs[15];
203#elif defined(TARGET_SPARC)
204#ifdef TARGET_SPARC64
205 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
206 flags = ((env->pstate & PS_AM) << 2)
207 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
208 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
209#else
210 // FPU enable . Supervisor
211 flags = (env->psref << 4) | env->psrs;
212#endif
213 cs_base = env->npc;
214 pc = env->pc;
215#elif defined(TARGET_PPC)
216 flags = env->hflags;
217 cs_base = 0;
218 pc = env->nip;
219#elif defined(TARGET_MIPS)
220 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
221 cs_base = 0;
222 pc = env->active_tc.PC;
223#elif defined(TARGET_M68K)
224 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
225 | (env->sr & SR_S) /* Bit 13 */
226 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
227 cs_base = 0;
228 pc = env->pc;
229#elif defined(TARGET_SH4)
230 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
231 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
232 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
233 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_ALPHA)
237 flags = env->ps;
238 cs_base = 0;
239 pc = env->pc;
240#elif defined(TARGET_CRIS)
241 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
242 flags |= env->dslot;
243 cs_base = 0;
244 pc = env->pc;
245#else
246#error unsupported CPU
247#endif
248 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
249 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
250 tb->flags != flags)) {
251 tb = tb_find_slow(pc, cs_base, flags);
252 }
253 return tb;
254}
255
256/* main execution loop */
257
258#ifdef VBOX
259
260int cpu_exec(CPUState *env1)
261{
262#define DECLARE_HOST_REGS 1
263#include "hostregs_helper.h"
264 int ret = 0, interrupt_request;
265 TranslationBlock *tb;
266 uint8_t *tc_ptr;
267 unsigned long next_tb;
268
269 cpu_single_env = env1;
270
271 /* first we save global registers */
272#define SAVE_HOST_REGS 1
273#include "hostregs_helper.h"
274 env = env1;
275
276 env_to_regs();
277#if defined(TARGET_I386)
278 /* put eflags in CPU temporary format */
279 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
280 DF = 1 - (2 * ((env->eflags >> 10) & 1));
281 CC_OP = CC_OP_EFLAGS;
282 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
283#elif defined(TARGET_SPARC)
284#elif defined(TARGET_M68K)
285 env->cc_op = CC_OP_FLAGS;
286 env->cc_dest = env->sr & 0xf;
287 env->cc_x = (env->sr >> 4) & 1;
288#elif defined(TARGET_ALPHA)
289#elif defined(TARGET_ARM)
290#elif defined(TARGET_PPC)
291#elif defined(TARGET_MIPS)
292#elif defined(TARGET_SH4)
293#elif defined(TARGET_CRIS)
294 /* XXXXX */
295#else
296#error unsupported target CPU
297#endif
298#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
299 env->exception_index = -1;
300#endif
301
302 /* prepare setjmp context for exception handling */
303 for(;;) {
304 if (setjmp(env->jmp_env) == 0)
305 {
306 env->current_tb = NULL;
307
308 /*
309 * Check for fatal errors first
310 */
311 if (env->interrupt_request & CPU_INTERRUPT_RC) {
312 env->exception_index = EXCP_RC;
313 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
314 ret = env->exception_index;
315 cpu_loop_exit();
316 }
317
318 /* if an exception is pending, we execute it here */
319 if (env->exception_index >= 0) {
320 Assert(!env->user_mode_only);
321 if (env->exception_index >= EXCP_INTERRUPT) {
322 /* exit request from the cpu execution loop */
323 ret = env->exception_index;
324 break;
325 } else {
326 /* simulate a real cpu exception. On i386, it can
327 trigger new exceptions, but we do not handle
328 double or triple faults yet. */
329 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
330 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
331 do_interrupt(env->exception_index,
332 env->exception_is_int,
333 env->error_code,
334 env->exception_next_eip, 0);
335 /* successfully delivered */
336 env->old_exception = -1;
337 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
338 }
339 env->exception_index = -1;
340 }
341
342 next_tb = 0; /* force lookup of first TB */
343 for(;;)
344 {
345 interrupt_request = env->interrupt_request;
346#ifndef VBOX
347 if (__builtin_expect(interrupt_request, 0))
348#else
349 if (RT_UNLIKELY(interrupt_request != 0))
350#endif
351 {
352 /** @todo: reconscille with what QEMU really does */
353
354 /* Single instruction exec request, we execute it and return (one way or the other).
355 The caller will always reschedule after doing this operation! */
356 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
357 {
358 /* not in flight are we? (if we are, we trapped) */
359 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
360 {
361 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
362 env->exception_index = EXCP_SINGLE_INSTR;
363 if (emulate_single_instr(env) == -1)
364 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", env->eip));
365
366 /* When we receive an external interrupt during execution of this single
367 instruction, then we should stay here. We will leave when we're ready
368 for raw-mode or when interrupted by pending EMT requests. */
369 interrupt_request = env->interrupt_request; /* reload this! */
370 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
371 || !(env->eflags & IF_MASK)
372 || (env->hflags & HF_INHIBIT_IRQ_MASK)
373 || (env->state & CPU_RAW_HWACC)
374 )
375 {
376 env->exception_index = ret = EXCP_SINGLE_INSTR;
377 cpu_loop_exit();
378 }
379 }
380 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
381 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
382 }
383
384 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
385 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
386 !(env->hflags & HF_SMM_MASK)) {
387 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
388 do_smm_enter();
389 next_tb = 0;
390 }
391 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
392 (env->eflags & IF_MASK) &&
393 !(env->hflags & HF_INHIBIT_IRQ_MASK))
394 {
395 /* if hardware interrupt pending, we execute it */
396 int intno;
397 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
398 intno = cpu_get_pic_interrupt(env);
399 if (intno >= 0)
400 {
401 Log(("do_interrupt %d\n", intno));
402 do_interrupt(intno, 0, 0, 0, 1);
403 }
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
406 next_tb = 0;
407 }
408 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
409 {
410 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
413 next_tb = 0;
414 }
415 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
416 if (interrupt_request & CPU_INTERRUPT_EXIT)
417 {
418 env->exception_index = EXCP_INTERRUPT;
419 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
420 ret = env->exception_index;
421 cpu_loop_exit();
422 }
423 if (interrupt_request & CPU_INTERRUPT_RC)
424 {
425 env->exception_index = EXCP_RC;
426 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
427 ret = env->exception_index;
428 cpu_loop_exit();
429 }
430 }
431
432 /*
433 * Check if we the CPU state allows us to execute the code in raw-mode.
434 */
435 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
436 if (remR3CanExecuteRaw(env,
437 env->eip + env->segs[R_CS].base,
438 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
439 &env->exception_index))
440 {
441 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
442 ret = env->exception_index;
443 cpu_loop_exit();
444 }
445 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
446
447 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
448 spin_lock(&tb_lock);
449 tb = tb_find_fast();
450 /* Note: we do it here to avoid a gcc bug on Mac OS X when
451 doing it in tb_find_slow */
452 if (tb_invalidated_flag) {
453 /* as some TB could have been invalidated because
454 of memory exceptions while generating the code, we
455 must recompute the hash index here */
456 next_tb = 0;
457 tb_invalidated_flag = 0;
458 }
459
460 /* see if we can patch the calling TB. When the TB
461 spans two pages, we cannot safely do a direct
462 jump. */
463 if (next_tb != 0
464 && !(tb->cflags & CF_RAW_MODE)
465 && tb->page_addr[1] == -1)
466 {
467 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
468 }
469 spin_unlock(&tb_lock);
470 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
471
472 env->current_tb = tb;
473 while (env->current_tb) {
474 tc_ptr = tb->tc_ptr;
475 /* execute the generated code */
476 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
477#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
478 tcg_qemu_tb_exec(tc_ptr, next_tb);
479#else
480 next_tb = tcg_qemu_tb_exec(tc_ptr);
481#endif
482 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
483 env->current_tb = NULL;
484 if ((next_tb & 3) == 2) {
485 /* Instruction counter expired. */
486 int insns_left;
487 tb = (TranslationBlock *)(long)(next_tb & ~3);
488 /* Restore PC. */
489 CPU_PC_FROM_TB(env, tb);
490 insns_left = env->icount_decr.u32;
491 if (env->icount_extra && insns_left >= 0) {
492 /* Refill decrementer and continue execution. */
493 env->icount_extra += insns_left;
494 if (env->icount_extra > 0xffff) {
495 insns_left = 0xffff;
496 } else {
497 insns_left = env->icount_extra;
498 }
499 env->icount_extra -= insns_left;
500 env->icount_decr.u16.low = insns_left;
501 } else {
502 if (insns_left > 0) {
503 /* Execute remaining instructions. */
504 cpu_exec_nocache(insns_left, tb);
505 }
506 env->exception_index = EXCP_INTERRUPT;
507 next_tb = 0;
508 cpu_loop_exit();
509 }
510 }
511 }
512
513 /* reset soft MMU for next block (it can currently
514 only be set by a memory fault) */
515#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
516 if (env->hflags & HF_SOFTMMU_MASK) {
517 env->hflags &= ~HF_SOFTMMU_MASK;
518 /* do not allow linking to another block */
519 next_tb = 0;
520 }
521#endif
522 } /* for(;;) */
523 } else {
524 env_to_regs();
525 }
526#ifdef VBOX_HIGH_RES_TIMERS_HACK
527 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
528 unnecessary (like crashing during emulate single instruction).
529 Note! Don't use env1->pVM here, the code wouldn't run with
530 gcc-4.4/amd64 anymore, see #3883. */
531 env->current_tb = NULL;
532 if ( !(env->interrupt_request & ( CPU_INTERRUPT_EXIT | CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
533 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
534 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
535 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
536 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
537 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
538 TMR3TimerQueuesDo(env->pVM);
539 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
540 }
541#endif
542 } /* for(;;) */
543
544#if defined(TARGET_I386)
545 /* restore flags in standard format */
546 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
547#else
548#error unsupported target CPU
549#endif
550#include "hostregs_helper.h"
551 return ret;
552}
553
554#else /* !VBOX */
555int cpu_exec(CPUState *env1)
556{
557#define DECLARE_HOST_REGS 1
558#include "hostregs_helper.h"
559 int ret, interrupt_request;
560 TranslationBlock *tb;
561 uint8_t *tc_ptr;
562 unsigned long next_tb;
563
564 if (cpu_halted(env1) == EXCP_HALTED)
565 return EXCP_HALTED;
566
567 cpu_single_env = env1;
568
569 /* first we save global registers */
570#define SAVE_HOST_REGS 1
571#include "hostregs_helper.h"
572 env = env1;
573
574 env_to_regs();
575#if defined(TARGET_I386)
576 /* put eflags in CPU temporary format */
577 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
578 DF = 1 - (2 * ((env->eflags >> 10) & 1));
579 CC_OP = CC_OP_EFLAGS;
580 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
581#elif defined(TARGET_SPARC)
582#elif defined(TARGET_M68K)
583 env->cc_op = CC_OP_FLAGS;
584 env->cc_dest = env->sr & 0xf;
585 env->cc_x = (env->sr >> 4) & 1;
586#elif defined(TARGET_ALPHA)
587#elif defined(TARGET_ARM)
588#elif defined(TARGET_PPC)
589#elif defined(TARGET_MIPS)
590#elif defined(TARGET_SH4)
591#elif defined(TARGET_CRIS)
592 /* XXXXX */
593#else
594#error unsupported target CPU
595#endif
596 env->exception_index = -1;
597
598 /* prepare setjmp context for exception handling */
599 for(;;) {
600 if (setjmp(env->jmp_env) == 0) {
601 env->current_tb = NULL;
602 /* if an exception is pending, we execute it here */
603 if (env->exception_index >= 0) {
604 if (env->exception_index >= EXCP_INTERRUPT) {
605 /* exit request from the cpu execution loop */
606 ret = env->exception_index;
607 break;
608 } else if (env->user_mode_only) {
609 /* if user mode only, we simulate a fake exception
610 which will be handled outside the cpu execution
611 loop */
612#if defined(TARGET_I386)
613 do_interrupt_user(env->exception_index,
614 env->exception_is_int,
615 env->error_code,
616 env->exception_next_eip);
617 /* successfully delivered */
618 env->old_exception = -1;
619#endif
620 ret = env->exception_index;
621 break;
622 } else {
623#if defined(TARGET_I386)
624 /* simulate a real cpu exception. On i386, it can
625 trigger new exceptions, but we do not handle
626 double or triple faults yet. */
627 do_interrupt(env->exception_index,
628 env->exception_is_int,
629 env->error_code,
630 env->exception_next_eip, 0);
631 /* successfully delivered */
632 env->old_exception = -1;
633#elif defined(TARGET_PPC)
634 do_interrupt(env);
635#elif defined(TARGET_MIPS)
636 do_interrupt(env);
637#elif defined(TARGET_SPARC)
638 do_interrupt(env);
639#elif defined(TARGET_ARM)
640 do_interrupt(env);
641#elif defined(TARGET_SH4)
642 do_interrupt(env);
643#elif defined(TARGET_ALPHA)
644 do_interrupt(env);
645#elif defined(TARGET_CRIS)
646 do_interrupt(env);
647#elif defined(TARGET_M68K)
648 do_interrupt(0);
649#endif
650 }
651 env->exception_index = -1;
652 }
653#ifdef USE_KQEMU
654 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
655 int ret;
656 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
657 ret = kqemu_cpu_exec(env);
658 /* put eflags in CPU temporary format */
659 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
660 DF = 1 - (2 * ((env->eflags >> 10) & 1));
661 CC_OP = CC_OP_EFLAGS;
662 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
663 if (ret == 1) {
664 /* exception */
665 longjmp(env->jmp_env, 1);
666 } else if (ret == 2) {
667 /* softmmu execution needed */
668 } else {
669 if (env->interrupt_request != 0) {
670 /* hardware interrupt will be executed just after */
671 } else {
672 /* otherwise, we restart */
673 longjmp(env->jmp_env, 1);
674 }
675 }
676 }
677#endif
678
679 next_tb = 0; /* force lookup of first TB */
680 for(;;) {
681 interrupt_request = env->interrupt_request;
682 if (unlikely(interrupt_request) &&
683 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
684 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
685 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
686 env->exception_index = EXCP_DEBUG;
687 cpu_loop_exit();
688 }
689#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
690 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
691 if (interrupt_request & CPU_INTERRUPT_HALT) {
692 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
693 env->halted = 1;
694 env->exception_index = EXCP_HLT;
695 cpu_loop_exit();
696 }
697#endif
698#if defined(TARGET_I386)
699 if (env->hflags2 & HF2_GIF_MASK) {
700 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
701 !(env->hflags & HF_SMM_MASK)) {
702 svm_check_intercept(SVM_EXIT_SMI);
703 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
704 do_smm_enter();
705 next_tb = 0;
706 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
707 !(env->hflags2 & HF2_NMI_MASK)) {
708 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
709 env->hflags2 |= HF2_NMI_MASK;
710 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
711 next_tb = 0;
712 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
713 (((env->hflags2 & HF2_VINTR_MASK) &&
714 (env->hflags2 & HF2_HIF_MASK)) ||
715 (!(env->hflags2 & HF2_VINTR_MASK) &&
716 (env->eflags & IF_MASK &&
717 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
718 int intno;
719 svm_check_intercept(SVM_EXIT_INTR);
720 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
721 intno = cpu_get_pic_interrupt(env);
722 if (loglevel & CPU_LOG_TB_IN_ASM) {
723 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
724 }
725 do_interrupt(intno, 0, 0, 0, 1);
726 /* ensure that no TB jump will be modified as
727 the program flow was changed */
728 next_tb = 0;
729#if !defined(CONFIG_USER_ONLY)
730 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
731 (env->eflags & IF_MASK) &&
732 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
733 int intno;
734 /* FIXME: this should respect TPR */
735 svm_check_intercept(SVM_EXIT_VINTR);
736 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
737 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
738 if (loglevel & CPU_LOG_TB_IN_ASM)
739 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
740 do_interrupt(intno, 0, 0, 0, 1);
741 next_tb = 0;
742#endif
743 }
744 }
745#elif defined(TARGET_PPC)
746#if 0
747 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
748 cpu_ppc_reset(env);
749 }
750#endif
751 if (interrupt_request & CPU_INTERRUPT_HARD) {
752 ppc_hw_interrupt(env);
753 if (env->pending_interrupts == 0)
754 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
755 next_tb = 0;
756 }
757#elif defined(TARGET_MIPS)
758 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
759 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
760 (env->CP0_Status & (1 << CP0St_IE)) &&
761 !(env->CP0_Status & (1 << CP0St_EXL)) &&
762 !(env->CP0_Status & (1 << CP0St_ERL)) &&
763 !(env->hflags & MIPS_HFLAG_DM)) {
764 /* Raise it */
765 env->exception_index = EXCP_EXT_INTERRUPT;
766 env->error_code = 0;
767 do_interrupt(env);
768 next_tb = 0;
769 }
770#elif defined(TARGET_SPARC)
771 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
772 (env->psret != 0)) {
773 int pil = env->interrupt_index & 15;
774 int type = env->interrupt_index & 0xf0;
775
776 if (((type == TT_EXTINT) &&
777 (pil == 15 || pil > env->psrpil)) ||
778 type != TT_EXTINT) {
779 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
780 env->exception_index = env->interrupt_index;
781 do_interrupt(env);
782 env->interrupt_index = 0;
783#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
784 cpu_check_irqs(env);
785#endif
786 next_tb = 0;
787 }
788 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
789 //do_interrupt(0, 0, 0, 0, 0);
790 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
791 }
792#elif defined(TARGET_ARM)
793 if (interrupt_request & CPU_INTERRUPT_FIQ
794 && !(env->uncached_cpsr & CPSR_F)) {
795 env->exception_index = EXCP_FIQ;
796 do_interrupt(env);
797 next_tb = 0;
798 }
799 /* ARMv7-M interrupt return works by loading a magic value
800 into the PC. On real hardware the load causes the
801 return to occur. The qemu implementation performs the
802 jump normally, then does the exception return when the
803 CPU tries to execute code at the magic address.
804 This will cause the magic PC value to be pushed to
805 the stack if an interrupt occured at the wrong time.
806 We avoid this by disabling interrupts when
807 pc contains a magic address. */
808 if (interrupt_request & CPU_INTERRUPT_HARD
809 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
810 || !(env->uncached_cpsr & CPSR_I))) {
811 env->exception_index = EXCP_IRQ;
812 do_interrupt(env);
813 next_tb = 0;
814 }
815#elif defined(TARGET_SH4)
816 if (interrupt_request & CPU_INTERRUPT_HARD) {
817 do_interrupt(env);
818 next_tb = 0;
819 }
820#elif defined(TARGET_ALPHA)
821 if (interrupt_request & CPU_INTERRUPT_HARD) {
822 do_interrupt(env);
823 next_tb = 0;
824 }
825#elif defined(TARGET_CRIS)
826 if (interrupt_request & CPU_INTERRUPT_HARD
827 && (env->pregs[PR_CCS] & I_FLAG)) {
828 env->exception_index = EXCP_IRQ;
829 do_interrupt(env);
830 next_tb = 0;
831 }
832 if (interrupt_request & CPU_INTERRUPT_NMI
833 && (env->pregs[PR_CCS] & M_FLAG)) {
834 env->exception_index = EXCP_NMI;
835 do_interrupt(env);
836 next_tb = 0;
837 }
838#elif defined(TARGET_M68K)
839 if (interrupt_request & CPU_INTERRUPT_HARD
840 && ((env->sr & SR_I) >> SR_I_SHIFT)
841 < env->pending_level) {
842 /* Real hardware gets the interrupt vector via an
843 IACK cycle at this point. Current emulated
844 hardware doesn't rely on this, so we
845 provide/save the vector when the interrupt is
846 first signalled. */
847 env->exception_index = env->pending_vector;
848 do_interrupt(1);
849 next_tb = 0;
850 }
851#endif
852 /* Don't use the cached interupt_request value,
853 do_interrupt may have updated the EXITTB flag. */
854 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
855 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
856 /* ensure that no TB jump will be modified as
857 the program flow was changed */
858 next_tb = 0;
859 }
860 if (interrupt_request & CPU_INTERRUPT_EXIT) {
861 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
862 env->exception_index = EXCP_INTERRUPT;
863 cpu_loop_exit();
864 }
865 }
866#ifdef DEBUG_EXEC
867 if ((loglevel & CPU_LOG_TB_CPU)) {
868 /* restore flags in standard format */
869 regs_to_env();
870#if defined(TARGET_I386)
871 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
872 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
873 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
874#elif defined(TARGET_ARM)
875 cpu_dump_state(env, logfile, fprintf, 0);
876#elif defined(TARGET_SPARC)
877 cpu_dump_state(env, logfile, fprintf, 0);
878#elif defined(TARGET_PPC)
879 cpu_dump_state(env, logfile, fprintf, 0);
880#elif defined(TARGET_M68K)
881 cpu_m68k_flush_flags(env, env->cc_op);
882 env->cc_op = CC_OP_FLAGS;
883 env->sr = (env->sr & 0xffe0)
884 | env->cc_dest | (env->cc_x << 4);
885 cpu_dump_state(env, logfile, fprintf, 0);
886#elif defined(TARGET_MIPS)
887 cpu_dump_state(env, logfile, fprintf, 0);
888#elif defined(TARGET_SH4)
889 cpu_dump_state(env, logfile, fprintf, 0);
890#elif defined(TARGET_ALPHA)
891 cpu_dump_state(env, logfile, fprintf, 0);
892#elif defined(TARGET_CRIS)
893 cpu_dump_state(env, logfile, fprintf, 0);
894#else
895#error unsupported target CPU
896#endif
897 }
898#endif
899 spin_lock(&tb_lock);
900 tb = tb_find_fast();
901 /* Note: we do it here to avoid a gcc bug on Mac OS X when
902 doing it in tb_find_slow */
903 if (tb_invalidated_flag) {
904 /* as some TB could have been invalidated because
905 of memory exceptions while generating the code, we
906 must recompute the hash index here */
907 next_tb = 0;
908 tb_invalidated_flag = 0;
909 }
910#ifdef DEBUG_EXEC
911 if ((loglevel & CPU_LOG_EXEC)) {
912 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
913 (long)tb->tc_ptr, tb->pc,
914 lookup_symbol(tb->pc));
915 }
916#endif
917 /* see if we can patch the calling TB. When the TB
918 spans two pages, we cannot safely do a direct
919 jump. */
920 {
921 if (next_tb != 0 &&
922#ifdef USE_KQEMU
923 (env->kqemu_enabled != 2) &&
924#endif
925 tb->page_addr[1] == -1) {
926 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
927 }
928 }
929 spin_unlock(&tb_lock);
930 env->current_tb = tb;
931 while (env->current_tb) {
932 tc_ptr = tb->tc_ptr;
933 /* execute the generated code */
934#if defined(__sparc__) && !defined(HOST_SOLARIS)
935#undef env
936 env = cpu_single_env;
937#define env cpu_single_env
938#endif
939 next_tb = tcg_qemu_tb_exec(tc_ptr);
940 env->current_tb = NULL;
941 if ((next_tb & 3) == 2) {
942 /* Instruction counter expired. */
943 int insns_left;
944 tb = (TranslationBlock *)(long)(next_tb & ~3);
945 /* Restore PC. */
946 CPU_PC_FROM_TB(env, tb);
947 insns_left = env->icount_decr.u32;
948 if (env->icount_extra && insns_left >= 0) {
949 /* Refill decrementer and continue execution. */
950 env->icount_extra += insns_left;
951 if (env->icount_extra > 0xffff) {
952 insns_left = 0xffff;
953 } else {
954 insns_left = env->icount_extra;
955 }
956 env->icount_extra -= insns_left;
957 env->icount_decr.u16.low = insns_left;
958 } else {
959 if (insns_left > 0) {
960 /* Execute remaining instructions. */
961 cpu_exec_nocache(insns_left, tb);
962 }
963 env->exception_index = EXCP_INTERRUPT;
964 next_tb = 0;
965 cpu_loop_exit();
966 }
967 }
968 }
969 /* reset soft MMU for next block (it can currently
970 only be set by a memory fault) */
971#if defined(USE_KQEMU)
972#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
973 if (kqemu_is_ok(env) &&
974 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
975 cpu_loop_exit();
976 }
977#endif
978 } /* for(;;) */
979 } else {
980 env_to_regs();
981 }
982 } /* for(;;) */
983
984
985#if defined(TARGET_I386)
986 /* restore flags in standard format */
987 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
988#elif defined(TARGET_ARM)
989 /* XXX: Save/restore host fpu exception state?. */
990#elif defined(TARGET_SPARC)
991#elif defined(TARGET_PPC)
992#elif defined(TARGET_M68K)
993 cpu_m68k_flush_flags(env, env->cc_op);
994 env->cc_op = CC_OP_FLAGS;
995 env->sr = (env->sr & 0xffe0)
996 | env->cc_dest | (env->cc_x << 4);
997#elif defined(TARGET_MIPS)
998#elif defined(TARGET_SH4)
999#elif defined(TARGET_ALPHA)
1000#elif defined(TARGET_CRIS)
1001 /* XXXXX */
1002#else
1003#error unsupported target CPU
1004#endif
1005
1006 /* restore global registers */
1007#include "hostregs_helper.h"
1008
1009 /* fail safe : never use cpu_single_env outside cpu_exec() */
1010 cpu_single_env = NULL;
1011 return ret;
1012}
1013#endif /* !VBOX */
1014
1015/* must only be called from the generated code as an exception can be
1016 generated */
1017void tb_invalidate_page_range(target_ulong start, target_ulong end)
1018{
1019 /* XXX: cannot enable it yet because it yields to MMU exception
1020 where NIP != read address on PowerPC */
1021#if 0
1022 target_ulong phys_addr;
1023 phys_addr = get_phys_addr_code(env, start);
1024 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1025#endif
1026}
1027
1028#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1029
1030void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1031{
1032 CPUX86State *saved_env;
1033
1034 saved_env = env;
1035 env = s;
1036 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1037 selector &= 0xffff;
1038 cpu_x86_load_seg_cache(env, seg_reg, selector,
1039 (selector << 4), 0xffff, 0);
1040 } else {
1041 load_seg(seg_reg, selector);
1042 }
1043 env = saved_env;
1044}
1045
1046void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1047{
1048 CPUX86State *saved_env;
1049
1050 saved_env = env;
1051 env = s;
1052
1053 helper_fsave((target_ulong)ptr, data32);
1054
1055 env = saved_env;
1056}
1057
1058void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1059{
1060 CPUX86State *saved_env;
1061
1062 saved_env = env;
1063 env = s;
1064
1065 helper_frstor((target_ulong)ptr, data32);
1066
1067 env = saved_env;
1068}
1069
1070#endif /* TARGET_I386 */
1071
1072#if !defined(CONFIG_SOFTMMU)
1073
1074#if defined(TARGET_I386)
1075
1076/* 'pc' is the host PC at which the exception was raised. 'address' is
1077 the effective address of the memory exception. 'is_write' is 1 if a
1078 write caused the exception and otherwise 0'. 'old_set' is the
1079 signal set which should be restored */
1080static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1081 int is_write, sigset_t *old_set,
1082 void *puc)
1083{
1084 TranslationBlock *tb;
1085 int ret;
1086
1087 if (cpu_single_env)
1088 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1089#if defined(DEBUG_SIGNAL)
1090 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1091 pc, address, is_write, *(unsigned long *)old_set);
1092#endif
1093 /* XXX: locking issue */
1094 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1095 return 1;
1096 }
1097
1098 /* see if it is an MMU fault */
1099 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1100 ((env->hflags & HF_CPL_MASK) == 3), 0);
1101 if (ret < 0)
1102 return 0; /* not an MMU fault */
1103 if (ret == 0)
1104 return 1; /* the MMU fault was handled without causing real CPU fault */
1105 /* now we have a real cpu fault */
1106 tb = tb_find_pc(pc);
1107 if (tb) {
1108 /* the PC is inside the translated code. It means that we have
1109 a virtual CPU fault */
1110 cpu_restore_state(tb, env, pc, puc);
1111 }
1112 if (ret == 1) {
1113#if 0
1114 printf("PF exception: EIP=0x%RGv CR2=0x%RGv error=0x%x\n",
1115 env->eip, env->cr[2], env->error_code);
1116#endif
1117 /* we restore the process signal mask as the sigreturn should
1118 do it (XXX: use sigsetjmp) */
1119 sigprocmask(SIG_SETMASK, old_set, NULL);
1120 raise_exception_err(env->exception_index, env->error_code);
1121 } else {
1122 /* activate soft MMU for this block */
1123 env->hflags |= HF_SOFTMMU_MASK;
1124 cpu_resume_from_signal(env, puc);
1125 }
1126 /* never comes here */
1127 return 1;
1128}
1129
1130#elif defined(TARGET_ARM)
1131static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1132 int is_write, sigset_t *old_set,
1133 void *puc)
1134{
1135 TranslationBlock *tb;
1136 int ret;
1137
1138 if (cpu_single_env)
1139 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1140#if defined(DEBUG_SIGNAL)
1141 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1142 pc, address, is_write, *(unsigned long *)old_set);
1143#endif
1144 /* XXX: locking issue */
1145 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1146 return 1;
1147 }
1148 /* see if it is an MMU fault */
1149 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1150 if (ret < 0)
1151 return 0; /* not an MMU fault */
1152 if (ret == 0)
1153 return 1; /* the MMU fault was handled without causing real CPU fault */
1154 /* now we have a real cpu fault */
1155 tb = tb_find_pc(pc);
1156 if (tb) {
1157 /* the PC is inside the translated code. It means that we have
1158 a virtual CPU fault */
1159 cpu_restore_state(tb, env, pc, puc);
1160 }
1161 /* we restore the process signal mask as the sigreturn should
1162 do it (XXX: use sigsetjmp) */
1163 sigprocmask(SIG_SETMASK, old_set, NULL);
1164 cpu_loop_exit();
1165}
1166#elif defined(TARGET_SPARC)
1167static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1168 int is_write, sigset_t *old_set,
1169 void *puc)
1170{
1171 TranslationBlock *tb;
1172 int ret;
1173
1174 if (cpu_single_env)
1175 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1176#if defined(DEBUG_SIGNAL)
1177 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1178 pc, address, is_write, *(unsigned long *)old_set);
1179#endif
1180 /* XXX: locking issue */
1181 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1182 return 1;
1183 }
1184 /* see if it is an MMU fault */
1185 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1186 if (ret < 0)
1187 return 0; /* not an MMU fault */
1188 if (ret == 0)
1189 return 1; /* the MMU fault was handled without causing real CPU fault */
1190 /* now we have a real cpu fault */
1191 tb = tb_find_pc(pc);
1192 if (tb) {
1193 /* the PC is inside the translated code. It means that we have
1194 a virtual CPU fault */
1195 cpu_restore_state(tb, env, pc, puc);
1196 }
1197 /* we restore the process signal mask as the sigreturn should
1198 do it (XXX: use sigsetjmp) */
1199 sigprocmask(SIG_SETMASK, old_set, NULL);
1200 cpu_loop_exit();
1201}
1202#elif defined (TARGET_PPC)
1203static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1204 int is_write, sigset_t *old_set,
1205 void *puc)
1206{
1207 TranslationBlock *tb;
1208 int ret;
1209
1210 if (cpu_single_env)
1211 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1212#if defined(DEBUG_SIGNAL)
1213 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1214 pc, address, is_write, *(unsigned long *)old_set);
1215#endif
1216 /* XXX: locking issue */
1217 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1218 return 1;
1219 }
1220
1221 /* see if it is an MMU fault */
1222 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1223 if (ret < 0)
1224 return 0; /* not an MMU fault */
1225 if (ret == 0)
1226 return 1; /* the MMU fault was handled without causing real CPU fault */
1227
1228 /* now we have a real cpu fault */
1229 tb = tb_find_pc(pc);
1230 if (tb) {
1231 /* the PC is inside the translated code. It means that we have
1232 a virtual CPU fault */
1233 cpu_restore_state(tb, env, pc, puc);
1234 }
1235 if (ret == 1) {
1236#if 0
1237 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1238 env->nip, env->error_code, tb);
1239#endif
1240 /* we restore the process signal mask as the sigreturn should
1241 do it (XXX: use sigsetjmp) */
1242 sigprocmask(SIG_SETMASK, old_set, NULL);
1243 do_raise_exception_err(env->exception_index, env->error_code);
1244 } else {
1245 /* activate soft MMU for this block */
1246 cpu_resume_from_signal(env, puc);
1247 }
1248 /* never comes here */
1249 return 1;
1250}
1251
1252#elif defined(TARGET_M68K)
1253static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1254 int is_write, sigset_t *old_set,
1255 void *puc)
1256{
1257 TranslationBlock *tb;
1258 int ret;
1259
1260 if (cpu_single_env)
1261 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1262#if defined(DEBUG_SIGNAL)
1263 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1264 pc, address, is_write, *(unsigned long *)old_set);
1265#endif
1266 /* XXX: locking issue */
1267 if (is_write && page_unprotect(address, pc, puc)) {
1268 return 1;
1269 }
1270 /* see if it is an MMU fault */
1271 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1272 if (ret < 0)
1273 return 0; /* not an MMU fault */
1274 if (ret == 0)
1275 return 1; /* the MMU fault was handled without causing real CPU fault */
1276 /* now we have a real cpu fault */
1277 tb = tb_find_pc(pc);
1278 if (tb) {
1279 /* the PC is inside the translated code. It means that we have
1280 a virtual CPU fault */
1281 cpu_restore_state(tb, env, pc, puc);
1282 }
1283 /* we restore the process signal mask as the sigreturn should
1284 do it (XXX: use sigsetjmp) */
1285 sigprocmask(SIG_SETMASK, old_set, NULL);
1286 cpu_loop_exit();
1287 /* never comes here */
1288 return 1;
1289}
1290
1291#elif defined (TARGET_MIPS)
1292static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1293 int is_write, sigset_t *old_set,
1294 void *puc)
1295{
1296 TranslationBlock *tb;
1297 int ret;
1298
1299 if (cpu_single_env)
1300 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1301#if defined(DEBUG_SIGNAL)
1302 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1303 pc, address, is_write, *(unsigned long *)old_set);
1304#endif
1305 /* XXX: locking issue */
1306 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1307 return 1;
1308 }
1309
1310 /* see if it is an MMU fault */
1311 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1312 if (ret < 0)
1313 return 0; /* not an MMU fault */
1314 if (ret == 0)
1315 return 1; /* the MMU fault was handled without causing real CPU fault */
1316
1317 /* now we have a real cpu fault */
1318 tb = tb_find_pc(pc);
1319 if (tb) {
1320 /* the PC is inside the translated code. It means that we have
1321 a virtual CPU fault */
1322 cpu_restore_state(tb, env, pc, puc);
1323 }
1324 if (ret == 1) {
1325#if 0
1326 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1327 env->nip, env->error_code, tb);
1328#endif
1329 /* we restore the process signal mask as the sigreturn should
1330 do it (XXX: use sigsetjmp) */
1331 sigprocmask(SIG_SETMASK, old_set, NULL);
1332 do_raise_exception_err(env->exception_index, env->error_code);
1333 } else {
1334 /* activate soft MMU for this block */
1335 cpu_resume_from_signal(env, puc);
1336 }
1337 /* never comes here */
1338 return 1;
1339}
1340
1341#elif defined (TARGET_SH4)
1342static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1343 int is_write, sigset_t *old_set,
1344 void *puc)
1345{
1346 TranslationBlock *tb;
1347 int ret;
1348
1349 if (cpu_single_env)
1350 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1351#if defined(DEBUG_SIGNAL)
1352 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1353 pc, address, is_write, *(unsigned long *)old_set);
1354#endif
1355 /* XXX: locking issue */
1356 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1357 return 1;
1358 }
1359
1360 /* see if it is an MMU fault */
1361 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1362 if (ret < 0)
1363 return 0; /* not an MMU fault */
1364 if (ret == 0)
1365 return 1; /* the MMU fault was handled without causing real CPU fault */
1366
1367 /* now we have a real cpu fault */
1368 tb = tb_find_pc(pc);
1369 if (tb) {
1370 /* the PC is inside the translated code. It means that we have
1371 a virtual CPU fault */
1372 cpu_restore_state(tb, env, pc, puc);
1373 }
1374#if 0
1375 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1376 env->nip, env->error_code, tb);
1377#endif
1378 /* we restore the process signal mask as the sigreturn should
1379 do it (XXX: use sigsetjmp) */
1380 sigprocmask(SIG_SETMASK, old_set, NULL);
1381 cpu_loop_exit();
1382 /* never comes here */
1383 return 1;
1384}
1385#else
1386#error unsupported target CPU
1387#endif
1388
1389#if defined(__i386__)
1390
1391#if defined(__APPLE__)
1392# include <sys/ucontext.h>
1393
1394# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1395# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1396# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1397#else
1398# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1399# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1400# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1401#endif
1402
1403int cpu_signal_handler(int host_signum, void *pinfo,
1404 void *puc)
1405{
1406 siginfo_t *info = pinfo;
1407 struct ucontext *uc = puc;
1408 unsigned long pc;
1409 int trapno;
1410
1411#ifndef REG_EIP
1412/* for glibc 2.1 */
1413#define REG_EIP EIP
1414#define REG_ERR ERR
1415#define REG_TRAPNO TRAPNO
1416#endif
1417 pc = uc->uc_mcontext.gregs[REG_EIP];
1418 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1419#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1420 if (trapno == 0x00 || trapno == 0x05) {
1421 /* send division by zero or bound exception */
1422 cpu_send_trap(pc, trapno, uc);
1423 return 1;
1424 } else
1425#endif
1426 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1427 trapno == 0xe ?
1428 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1429 &uc->uc_sigmask, puc);
1430}
1431
1432#elif defined(__x86_64__)
1433
1434int cpu_signal_handler(int host_signum, void *pinfo,
1435 void *puc)
1436{
1437 siginfo_t *info = pinfo;
1438 struct ucontext *uc = puc;
1439 unsigned long pc;
1440
1441 pc = uc->uc_mcontext.gregs[REG_RIP];
1442 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1443 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1444 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1445 &uc->uc_sigmask, puc);
1446}
1447
1448#elif defined(__powerpc__)
1449
1450/***********************************************************************
1451 * signal context platform-specific definitions
1452 * From Wine
1453 */
1454#ifdef linux
1455/* All Registers access - only for local access */
1456# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1457/* Gpr Registers access */
1458# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1459# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1460# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1461# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1462# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1463# define LR_sig(context) REG_sig(link, context) /* Link register */
1464# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1465/* Float Registers access */
1466# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1467# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1468/* Exception Registers access */
1469# define DAR_sig(context) REG_sig(dar, context)
1470# define DSISR_sig(context) REG_sig(dsisr, context)
1471# define TRAP_sig(context) REG_sig(trap, context)
1472#endif /* linux */
1473
1474#ifdef __APPLE__
1475# include <sys/ucontext.h>
1476typedef struct ucontext SIGCONTEXT;
1477/* All Registers access - only for local access */
1478# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1479# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1480# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1481# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1482/* Gpr Registers access */
1483# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1484# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1485# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1486# define CTR_sig(context) REG_sig(ctr, context)
1487# define XER_sig(context) REG_sig(xer, context) /* Link register */
1488# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1489# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1490/* Float Registers access */
1491# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1492# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1493/* Exception Registers access */
1494# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1495# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1496# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1497#endif /* __APPLE__ */
1498
1499int cpu_signal_handler(int host_signum, void *pinfo,
1500 void *puc)
1501{
1502 siginfo_t *info = pinfo;
1503 struct ucontext *uc = puc;
1504 unsigned long pc;
1505 int is_write;
1506
1507 pc = IAR_sig(uc);
1508 is_write = 0;
1509#if 0
1510 /* ppc 4xx case */
1511 if (DSISR_sig(uc) & 0x00800000)
1512 is_write = 1;
1513#else
1514 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1515 is_write = 1;
1516#endif
1517 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1518 is_write, &uc->uc_sigmask, puc);
1519}
1520
1521#elif defined(__alpha__)
1522
1523int cpu_signal_handler(int host_signum, void *pinfo,
1524 void *puc)
1525{
1526 siginfo_t *info = pinfo;
1527 struct ucontext *uc = puc;
1528 uint32_t *pc = uc->uc_mcontext.sc_pc;
1529 uint32_t insn = *pc;
1530 int is_write = 0;
1531
1532 /* XXX: need kernel patch to get write flag faster */
1533 switch (insn >> 26) {
1534 case 0x0d: // stw
1535 case 0x0e: // stb
1536 case 0x0f: // stq_u
1537 case 0x24: // stf
1538 case 0x25: // stg
1539 case 0x26: // sts
1540 case 0x27: // stt
1541 case 0x2c: // stl
1542 case 0x2d: // stq
1543 case 0x2e: // stl_c
1544 case 0x2f: // stq_c
1545 is_write = 1;
1546 }
1547
1548 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1549 is_write, &uc->uc_sigmask, puc);
1550}
1551#elif defined(__sparc__)
1552
1553int cpu_signal_handler(int host_signum, void *pinfo,
1554 void *puc)
1555{
1556 siginfo_t *info = pinfo;
1557 uint32_t *regs = (uint32_t *)(info + 1);
1558 void *sigmask = (regs + 20);
1559 unsigned long pc;
1560 int is_write;
1561 uint32_t insn;
1562
1563 /* XXX: is there a standard glibc define ? */
1564 pc = regs[1];
1565 /* XXX: need kernel patch to get write flag faster */
1566 is_write = 0;
1567 insn = *(uint32_t *)pc;
1568 if ((insn >> 30) == 3) {
1569 switch((insn >> 19) & 0x3f) {
1570 case 0x05: // stb
1571 case 0x06: // sth
1572 case 0x04: // st
1573 case 0x07: // std
1574 case 0x24: // stf
1575 case 0x27: // stdf
1576 case 0x25: // stfsr
1577 is_write = 1;
1578 break;
1579 }
1580 }
1581 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1582 is_write, sigmask, NULL);
1583}
1584
1585#elif defined(__arm__)
1586
1587int cpu_signal_handler(int host_signum, void *pinfo,
1588 void *puc)
1589{
1590 siginfo_t *info = pinfo;
1591 struct ucontext *uc = puc;
1592 unsigned long pc;
1593 int is_write;
1594
1595 pc = uc->uc_mcontext.gregs[R15];
1596 /* XXX: compute is_write */
1597 is_write = 0;
1598 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1599 is_write,
1600 &uc->uc_sigmask, puc);
1601}
1602
1603#elif defined(__mc68000)
1604
1605int cpu_signal_handler(int host_signum, void *pinfo,
1606 void *puc)
1607{
1608 siginfo_t *info = pinfo;
1609 struct ucontext *uc = puc;
1610 unsigned long pc;
1611 int is_write;
1612
1613 pc = uc->uc_mcontext.gregs[16];
1614 /* XXX: compute is_write */
1615 is_write = 0;
1616 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1617 is_write,
1618 &uc->uc_sigmask, puc);
1619}
1620
1621#elif defined(__ia64)
1622
1623#ifndef __ISR_VALID
1624 /* This ought to be in <bits/siginfo.h>... */
1625# define __ISR_VALID 1
1626#endif
1627
1628int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1629{
1630 siginfo_t *info = pinfo;
1631 struct ucontext *uc = puc;
1632 unsigned long ip;
1633 int is_write = 0;
1634
1635 ip = uc->uc_mcontext.sc_ip;
1636 switch (host_signum) {
1637 case SIGILL:
1638 case SIGFPE:
1639 case SIGSEGV:
1640 case SIGBUS:
1641 case SIGTRAP:
1642 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1643 /* ISR.W (write-access) is bit 33: */
1644 is_write = (info->si_isr >> 33) & 1;
1645 break;
1646
1647 default:
1648 break;
1649 }
1650 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1651 is_write,
1652 &uc->uc_sigmask, puc);
1653}
1654
1655#elif defined(__s390__)
1656
1657int cpu_signal_handler(int host_signum, void *pinfo,
1658 void *puc)
1659{
1660 siginfo_t *info = pinfo;
1661 struct ucontext *uc = puc;
1662 unsigned long pc;
1663 int is_write;
1664
1665 pc = uc->uc_mcontext.psw.addr;
1666 /* XXX: compute is_write */
1667 is_write = 0;
1668 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1669 is_write,
1670 &uc->uc_sigmask, puc);
1671}
1672
1673#else
1674
1675#error host CPU specific signal handler needed
1676
1677#endif
1678
1679#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette