VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 13607

最後變更 在這個檔案從13607是 13569,由 vboxsync 提交於 16 年 前

New recompiler successfully boots 32 and 64 guests on 64-bit Linux.

  • 屬性 svn:eol-style 設為 native
檔案大小: 60.2 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125{
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169}
170
171#ifndef VBOX
172static inline TranslationBlock *tb_find_fast(void)
173#else
174DECLINLINE(TranslationBlock *) tb_find_fast(void)
175#endif
176{
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 uint64_t flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184#if defined(TARGET_I386)
185 flags = env->hflags;
186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
187 cs_base = env->segs[R_CS].base;
188 pc = cs_base + env->eip;
189#elif defined(TARGET_ARM)
190 flags = env->thumb | (env->vfp.vec_len << 1)
191 | (env->vfp.vec_stride << 4);
192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
193 flags |= (1 << 6);
194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
195 flags |= (1 << 7);
196 flags |= (env->condexec_bits << 8);
197 cs_base = 0;
198 pc = env->regs[15];
199#elif defined(TARGET_SPARC)
200#ifdef TARGET_SPARC64
201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
202 flags = ((env->pstate & PS_AM) << 2)
203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
205#else
206 // FPU enable . Supervisor
207 flags = (env->psref << 4) | env->psrs;
208#endif
209 cs_base = env->npc;
210 pc = env->pc;
211#elif defined(TARGET_PPC)
212 flags = env->hflags;
213 cs_base = 0;
214 pc = env->nip;
215#elif defined(TARGET_MIPS)
216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
217 cs_base = 0;
218 pc = env->active_tc.PC;
219#elif defined(TARGET_M68K)
220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
221 | (env->sr & SR_S) /* Bit 13 */
222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
223 cs_base = 0;
224 pc = env->pc;
225#elif defined(TARGET_SH4)
226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_ALPHA)
233 flags = env->ps;
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_CRIS)
237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
238 flags |= env->dslot;
239 cs_base = 0;
240 pc = env->pc;
241#else
242#error unsupported CPU
243#endif
244 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
245 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
246 tb->flags != flags)) {
247 tb = tb_find_slow(pc, cs_base, flags);
248 }
249 return tb;
250}
251
252/* main execution loop */
253
254#ifdef VBOX
255
256int cpu_exec(CPUState *env1)
257{
258#define DECLARE_HOST_REGS 1
259#include "hostregs_helper.h"
260 int ret, interrupt_request;
261 TranslationBlock *tb;
262 uint8_t *tc_ptr;
263 unsigned long next_tb;
264
265#ifndef VBOX
266 if (cpu_halted(env1) == EXCP_HALTED)
267 return EXCP_HALTED;
268#endif
269
270 cpu_single_env = env1;
271
272 /* first we save global registers */
273#define SAVE_HOST_REGS 1
274#include "hostregs_helper.h"
275 env = env1;
276
277 env_to_regs();
278#if defined(TARGET_I386)
279 /* put eflags in CPU temporary format */
280 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
281 DF = 1 - (2 * ((env->eflags >> 10) & 1));
282 CC_OP = CC_OP_EFLAGS;
283 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
284#elif defined(TARGET_SPARC)
285#elif defined(TARGET_M68K)
286 env->cc_op = CC_OP_FLAGS;
287 env->cc_dest = env->sr & 0xf;
288 env->cc_x = (env->sr >> 4) & 1;
289#elif defined(TARGET_ALPHA)
290#elif defined(TARGET_ARM)
291#elif defined(TARGET_PPC)
292#elif defined(TARGET_MIPS)
293#elif defined(TARGET_SH4)
294#elif defined(TARGET_CRIS)
295 /* XXXXX */
296#else
297#error unsupported target CPU
298#endif
299#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
300 env->exception_index = -1;
301#endif
302
303 /* prepare setjmp context for exception handling */
304 for(;;) {
305 if (setjmp(env->jmp_env) == 0)
306 {
307 env->current_tb = NULL;
308 VMMR3Unlock(env->pVM);
309 VMMR3Lock(env->pVM);
310
311 /*
312 * Check for fatal errors first
313 */
314 if (env->interrupt_request & CPU_INTERRUPT_RC) {
315 env->exception_index = EXCP_RC;
316 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
317 ret = env->exception_index;
318 cpu_loop_exit();
319 }
320
321 /* if an exception is pending, we execute it here */
322 if (env->exception_index >= 0) {
323 Assert(!env->user_mode_only);
324 if (env->exception_index >= EXCP_INTERRUPT) {
325 /* exit request from the cpu execution loop */
326 ret = env->exception_index;
327 break;
328 } else {
329 /* simulate a real cpu exception. On i386, it can
330 trigger new exceptions, but we do not handle
331 double or triple faults yet. */
332 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
333 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
334 do_interrupt(env->exception_index,
335 env->exception_is_int,
336 env->error_code,
337 env->exception_next_eip, 0);
338 /* successfully delivered */
339 env->old_exception = -1;
340 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
341 }
342 env->exception_index = -1;
343 }
344
345 next_tb = 0; /* force lookup of first TB */
346 for(;;)
347 {
348 interrupt_request = env->interrupt_request;
349#ifndef VBOX
350 if (__builtin_expect(interrupt_request, 0))
351#else
352 if (RT_UNLIKELY(interrupt_request != 0))
353#endif
354 {
355 /** @todo: reconscille with what QEMU really does */
356
357 /* Single instruction exec request, we execute it and return (one way or the other).
358 The caller will always reschedule after doing this operation! */
359 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
360 {
361 /* not in flight are we? (if we are, we trapped) */
362 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
363 {
364 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
365 env->exception_index = EXCP_SINGLE_INSTR;
366 if (emulate_single_instr(env) == -1)
367 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
368
369 /* When we receive an external interrupt during execution of this single
370 instruction, then we should stay here. We will leave when we're ready
371 for raw-mode or when interrupted by pending EMT requests. */
372 interrupt_request = env->interrupt_request; /* reload this! */
373 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
374 || !(env->eflags & IF_MASK)
375 || (env->hflags & HF_INHIBIT_IRQ_MASK)
376 || (env->state & CPU_RAW_HWACC)
377 )
378 {
379 env->exception_index = ret = EXCP_SINGLE_INSTR;
380 cpu_loop_exit();
381 }
382 }
383 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
384 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
385 }
386
387 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
388 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
389 !(env->hflags & HF_SMM_MASK)) {
390 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
391 do_smm_enter();
392 next_tb = 0;
393 }
394 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
395 (env->eflags & IF_MASK) &&
396 !(env->hflags & HF_INHIBIT_IRQ_MASK))
397 {
398 /* if hardware interrupt pending, we execute it */
399 int intno;
400 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
401 intno = cpu_get_pic_interrupt(env);
402 if (intno >= 0)
403 {
404 Log(("do_interrupt %d\n", intno));
405 do_interrupt(intno, 0, 0, 0, 1);
406 }
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
409 next_tb = 0;
410 }
411 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
412 {
413 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
416 next_tb = 0;
417 }
418 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
419 if (interrupt_request & CPU_INTERRUPT_EXIT)
420 {
421 env->exception_index = EXCP_INTERRUPT;
422 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
423 ret = env->exception_index;
424 cpu_loop_exit();
425 }
426 if (interrupt_request & CPU_INTERRUPT_RC)
427 {
428 env->exception_index = EXCP_RC;
429 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
430 ret = env->exception_index;
431 cpu_loop_exit();
432 }
433 }
434
435 /*
436 * Check if we the CPU state allows us to execute the code in raw-mode.
437 */
438 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
439 if (remR3CanExecuteRaw(env,
440 env->eip + env->segs[R_CS].base,
441 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
442 &env->exception_index))
443 {
444 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
445 ret = env->exception_index;
446 cpu_loop_exit();
447 }
448 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
449
450 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
451 spin_lock(&tb_lock);
452 tb = tb_find_fast();
453 /* Note: we do it here to avoid a gcc bug on Mac OS X when
454 doing it in tb_find_slow */
455 if (tb_invalidated_flag) {
456 /* as some TB could have been invalidated because
457 of memory exceptions while generating the code, we
458 must recompute the hash index here */
459 next_tb = 0;
460 tb_invalidated_flag = 0;
461 }
462
463 /* see if we can patch the calling TB. When the TB
464 spans two pages, we cannot safely do a direct
465 jump. */
466 if (next_tb != 0
467 && !(tb->cflags & CF_RAW_MODE)
468 && tb->page_addr[1] == -1)
469 {
470 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
471 }
472 spin_unlock(&tb_lock);
473 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
474
475 env->current_tb = tb;
476 while (env->current_tb) {
477 tc_ptr = tb->tc_ptr;
478 /* execute the generated code */
479 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
480 next_tb = tcg_qemu_tb_exec(tc_ptr);
481 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
482 env->current_tb = NULL;
483 if ((next_tb & 3) == 2) {
484 /* Instruction counter expired. */
485 int insns_left;
486 tb = (TranslationBlock *)(long)(next_tb & ~3);
487 /* Restore PC. */
488 CPU_PC_FROM_TB(env, tb);
489 insns_left = env->icount_decr.u32;
490 if (env->icount_extra && insns_left >= 0) {
491 /* Refill decrementer and continue execution. */
492 env->icount_extra += insns_left;
493 if (env->icount_extra > 0xffff) {
494 insns_left = 0xffff;
495 } else {
496 insns_left = env->icount_extra;
497 }
498 env->icount_extra -= insns_left;
499 env->icount_decr.u16.low = insns_left;
500 } else {
501 if (insns_left > 0) {
502 /* Execute remaining instructions. */
503 cpu_exec_nocache(insns_left, tb);
504 }
505 env->exception_index = EXCP_INTERRUPT;
506 next_tb = 0;
507 cpu_loop_exit();
508 }
509 }
510 }
511
512 /* reset soft MMU for next block (it can currently
513 only be set by a memory fault) */
514#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
515 if (env->hflags & HF_SOFTMMU_MASK) {
516 env->hflags &= ~HF_SOFTMMU_MASK;
517 /* do not allow linking to another block */
518 next_tb = 0;
519 }
520#endif
521 } /* for(;;) */
522 } else {
523 env_to_regs();
524 }
525#ifdef VBOX_HIGH_RES_TIMERS_HACK
526 /* NULL the current_tb here so cpu_interrupt() doesn't do
527 anything unnecessary (like crashing during emulate single instruction). */
528 env->current_tb = NULL;
529 TMTimerPoll(env1->pVM);
530#endif
531 } /* for(;;) */
532
533#if defined(TARGET_I386)
534 /* restore flags in standard format */
535 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
536#else
537#error unsupported target CPU
538#endif
539#include "hostregs_helper.h"
540 return ret;
541}
542
543#else /* !VBOX */
544int cpu_exec(CPUState *env1)
545{
546#define DECLARE_HOST_REGS 1
547#include "hostregs_helper.h"
548 int ret, interrupt_request;
549 TranslationBlock *tb;
550 uint8_t *tc_ptr;
551 unsigned long next_tb;
552
553 if (cpu_halted(env1) == EXCP_HALTED)
554 return EXCP_HALTED;
555
556 cpu_single_env = env1;
557
558 /* first we save global registers */
559#define SAVE_HOST_REGS 1
560#include "hostregs_helper.h"
561 env = env1;
562
563 env_to_regs();
564#if defined(TARGET_I386)
565 /* put eflags in CPU temporary format */
566 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
567 DF = 1 - (2 * ((env->eflags >> 10) & 1));
568 CC_OP = CC_OP_EFLAGS;
569 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
570#elif defined(TARGET_SPARC)
571#elif defined(TARGET_M68K)
572 env->cc_op = CC_OP_FLAGS;
573 env->cc_dest = env->sr & 0xf;
574 env->cc_x = (env->sr >> 4) & 1;
575#elif defined(TARGET_ALPHA)
576#elif defined(TARGET_ARM)
577#elif defined(TARGET_PPC)
578#elif defined(TARGET_MIPS)
579#elif defined(TARGET_SH4)
580#elif defined(TARGET_CRIS)
581 /* XXXXX */
582#else
583#error unsupported target CPU
584#endif
585 env->exception_index = -1;
586
587 /* prepare setjmp context for exception handling */
588 for(;;) {
589 if (setjmp(env->jmp_env) == 0) {
590 env->current_tb = NULL;
591 /* if an exception is pending, we execute it here */
592 if (env->exception_index >= 0) {
593 if (env->exception_index >= EXCP_INTERRUPT) {
594 /* exit request from the cpu execution loop */
595 ret = env->exception_index;
596 break;
597 } else if (env->user_mode_only) {
598 /* if user mode only, we simulate a fake exception
599 which will be handled outside the cpu execution
600 loop */
601#if defined(TARGET_I386)
602 do_interrupt_user(env->exception_index,
603 env->exception_is_int,
604 env->error_code,
605 env->exception_next_eip);
606 /* successfully delivered */
607 env->old_exception = -1;
608#endif
609 ret = env->exception_index;
610 break;
611 } else {
612#if defined(TARGET_I386)
613 /* simulate a real cpu exception. On i386, it can
614 trigger new exceptions, but we do not handle
615 double or triple faults yet. */
616 do_interrupt(env->exception_index,
617 env->exception_is_int,
618 env->error_code,
619 env->exception_next_eip, 0);
620 /* successfully delivered */
621 env->old_exception = -1;
622#elif defined(TARGET_PPC)
623 do_interrupt(env);
624#elif defined(TARGET_MIPS)
625 do_interrupt(env);
626#elif defined(TARGET_SPARC)
627 do_interrupt(env);
628#elif defined(TARGET_ARM)
629 do_interrupt(env);
630#elif defined(TARGET_SH4)
631 do_interrupt(env);
632#elif defined(TARGET_ALPHA)
633 do_interrupt(env);
634#elif defined(TARGET_CRIS)
635 do_interrupt(env);
636#elif defined(TARGET_M68K)
637 do_interrupt(0);
638#endif
639 }
640 env->exception_index = -1;
641 }
642#ifdef USE_KQEMU
643 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
644 int ret;
645 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
646 ret = kqemu_cpu_exec(env);
647 /* put eflags in CPU temporary format */
648 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
649 DF = 1 - (2 * ((env->eflags >> 10) & 1));
650 CC_OP = CC_OP_EFLAGS;
651 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
652 if (ret == 1) {
653 /* exception */
654 longjmp(env->jmp_env, 1);
655 } else if (ret == 2) {
656 /* softmmu execution needed */
657 } else {
658 if (env->interrupt_request != 0) {
659 /* hardware interrupt will be executed just after */
660 } else {
661 /* otherwise, we restart */
662 longjmp(env->jmp_env, 1);
663 }
664 }
665 }
666#endif
667
668 next_tb = 0; /* force lookup of first TB */
669 for(;;) {
670 interrupt_request = env->interrupt_request;
671 if (unlikely(interrupt_request) &&
672 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
673 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
674 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
675 env->exception_index = EXCP_DEBUG;
676 cpu_loop_exit();
677 }
678#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
679 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
680 if (interrupt_request & CPU_INTERRUPT_HALT) {
681 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
682 env->halted = 1;
683 env->exception_index = EXCP_HLT;
684 cpu_loop_exit();
685 }
686#endif
687#if defined(TARGET_I386)
688 if (env->hflags2 & HF2_GIF_MASK) {
689 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
690 !(env->hflags & HF_SMM_MASK)) {
691 svm_check_intercept(SVM_EXIT_SMI);
692 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
693 do_smm_enter();
694 next_tb = 0;
695 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
696 !(env->hflags2 & HF2_NMI_MASK)) {
697 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
698 env->hflags2 |= HF2_NMI_MASK;
699 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
700 next_tb = 0;
701 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
702 (((env->hflags2 & HF2_VINTR_MASK) &&
703 (env->hflags2 & HF2_HIF_MASK)) ||
704 (!(env->hflags2 & HF2_VINTR_MASK) &&
705 (env->eflags & IF_MASK &&
706 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
707 int intno;
708 svm_check_intercept(SVM_EXIT_INTR);
709 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
710 intno = cpu_get_pic_interrupt(env);
711 if (loglevel & CPU_LOG_TB_IN_ASM) {
712 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
713 }
714 do_interrupt(intno, 0, 0, 0, 1);
715 /* ensure that no TB jump will be modified as
716 the program flow was changed */
717 next_tb = 0;
718#if !defined(CONFIG_USER_ONLY)
719 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
720 (env->eflags & IF_MASK) &&
721 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
722 int intno;
723 /* FIXME: this should respect TPR */
724 svm_check_intercept(SVM_EXIT_VINTR);
725 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
726 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
727 if (loglevel & CPU_LOG_TB_IN_ASM)
728 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
729 do_interrupt(intno, 0, 0, 0, 1);
730 next_tb = 0;
731#endif
732 }
733 }
734#elif defined(TARGET_PPC)
735#if 0
736 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
737 cpu_ppc_reset(env);
738 }
739#endif
740 if (interrupt_request & CPU_INTERRUPT_HARD) {
741 ppc_hw_interrupt(env);
742 if (env->pending_interrupts == 0)
743 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
744 next_tb = 0;
745 }
746#elif defined(TARGET_MIPS)
747 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
748 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
749 (env->CP0_Status & (1 << CP0St_IE)) &&
750 !(env->CP0_Status & (1 << CP0St_EXL)) &&
751 !(env->CP0_Status & (1 << CP0St_ERL)) &&
752 !(env->hflags & MIPS_HFLAG_DM)) {
753 /* Raise it */
754 env->exception_index = EXCP_EXT_INTERRUPT;
755 env->error_code = 0;
756 do_interrupt(env);
757 next_tb = 0;
758 }
759#elif defined(TARGET_SPARC)
760 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
761 (env->psret != 0)) {
762 int pil = env->interrupt_index & 15;
763 int type = env->interrupt_index & 0xf0;
764
765 if (((type == TT_EXTINT) &&
766 (pil == 15 || pil > env->psrpil)) ||
767 type != TT_EXTINT) {
768 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
769 env->exception_index = env->interrupt_index;
770 do_interrupt(env);
771 env->interrupt_index = 0;
772#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
773 cpu_check_irqs(env);
774#endif
775 next_tb = 0;
776 }
777 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
778 //do_interrupt(0, 0, 0, 0, 0);
779 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
780 }
781#elif defined(TARGET_ARM)
782 if (interrupt_request & CPU_INTERRUPT_FIQ
783 && !(env->uncached_cpsr & CPSR_F)) {
784 env->exception_index = EXCP_FIQ;
785 do_interrupt(env);
786 next_tb = 0;
787 }
788 /* ARMv7-M interrupt return works by loading a magic value
789 into the PC. On real hardware the load causes the
790 return to occur. The qemu implementation performs the
791 jump normally, then does the exception return when the
792 CPU tries to execute code at the magic address.
793 This will cause the magic PC value to be pushed to
794 the stack if an interrupt occured at the wrong time.
795 We avoid this by disabling interrupts when
796 pc contains a magic address. */
797 if (interrupt_request & CPU_INTERRUPT_HARD
798 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
799 || !(env->uncached_cpsr & CPSR_I))) {
800 env->exception_index = EXCP_IRQ;
801 do_interrupt(env);
802 next_tb = 0;
803 }
804#elif defined(TARGET_SH4)
805 if (interrupt_request & CPU_INTERRUPT_HARD) {
806 do_interrupt(env);
807 next_tb = 0;
808 }
809#elif defined(TARGET_ALPHA)
810 if (interrupt_request & CPU_INTERRUPT_HARD) {
811 do_interrupt(env);
812 next_tb = 0;
813 }
814#elif defined(TARGET_CRIS)
815 if (interrupt_request & CPU_INTERRUPT_HARD
816 && (env->pregs[PR_CCS] & I_FLAG)) {
817 env->exception_index = EXCP_IRQ;
818 do_interrupt(env);
819 next_tb = 0;
820 }
821 if (interrupt_request & CPU_INTERRUPT_NMI
822 && (env->pregs[PR_CCS] & M_FLAG)) {
823 env->exception_index = EXCP_NMI;
824 do_interrupt(env);
825 next_tb = 0;
826 }
827#elif defined(TARGET_M68K)
828 if (interrupt_request & CPU_INTERRUPT_HARD
829 && ((env->sr & SR_I) >> SR_I_SHIFT)
830 < env->pending_level) {
831 /* Real hardware gets the interrupt vector via an
832 IACK cycle at this point. Current emulated
833 hardware doesn't rely on this, so we
834 provide/save the vector when the interrupt is
835 first signalled. */
836 env->exception_index = env->pending_vector;
837 do_interrupt(1);
838 next_tb = 0;
839 }
840#endif
841 /* Don't use the cached interupt_request value,
842 do_interrupt may have updated the EXITTB flag. */
843 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
844 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
845 /* ensure that no TB jump will be modified as
846 the program flow was changed */
847 next_tb = 0;
848 }
849 if (interrupt_request & CPU_INTERRUPT_EXIT) {
850 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
851 env->exception_index = EXCP_INTERRUPT;
852 cpu_loop_exit();
853 }
854 }
855#ifdef DEBUG_EXEC
856 if ((loglevel & CPU_LOG_TB_CPU)) {
857 /* restore flags in standard format */
858 regs_to_env();
859#if defined(TARGET_I386)
860 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
861 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
862 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
863#elif defined(TARGET_ARM)
864 cpu_dump_state(env, logfile, fprintf, 0);
865#elif defined(TARGET_SPARC)
866 cpu_dump_state(env, logfile, fprintf, 0);
867#elif defined(TARGET_PPC)
868 cpu_dump_state(env, logfile, fprintf, 0);
869#elif defined(TARGET_M68K)
870 cpu_m68k_flush_flags(env, env->cc_op);
871 env->cc_op = CC_OP_FLAGS;
872 env->sr = (env->sr & 0xffe0)
873 | env->cc_dest | (env->cc_x << 4);
874 cpu_dump_state(env, logfile, fprintf, 0);
875#elif defined(TARGET_MIPS)
876 cpu_dump_state(env, logfile, fprintf, 0);
877#elif defined(TARGET_SH4)
878 cpu_dump_state(env, logfile, fprintf, 0);
879#elif defined(TARGET_ALPHA)
880 cpu_dump_state(env, logfile, fprintf, 0);
881#elif defined(TARGET_CRIS)
882 cpu_dump_state(env, logfile, fprintf, 0);
883#else
884#error unsupported target CPU
885#endif
886 }
887#endif
888 spin_lock(&tb_lock);
889 tb = tb_find_fast();
890 /* Note: we do it here to avoid a gcc bug on Mac OS X when
891 doing it in tb_find_slow */
892 if (tb_invalidated_flag) {
893 /* as some TB could have been invalidated because
894 of memory exceptions while generating the code, we
895 must recompute the hash index here */
896 next_tb = 0;
897 tb_invalidated_flag = 0;
898 }
899#ifdef DEBUG_EXEC
900 if ((loglevel & CPU_LOG_EXEC)) {
901 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
902 (long)tb->tc_ptr, tb->pc,
903 lookup_symbol(tb->pc));
904 }
905#endif
906 /* see if we can patch the calling TB. When the TB
907 spans two pages, we cannot safely do a direct
908 jump. */
909 {
910 if (next_tb != 0 &&
911#ifdef USE_KQEMU
912 (env->kqemu_enabled != 2) &&
913#endif
914 tb->page_addr[1] == -1) {
915 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
916 }
917 }
918 spin_unlock(&tb_lock);
919 env->current_tb = tb;
920 while (env->current_tb) {
921 tc_ptr = tb->tc_ptr;
922 /* execute the generated code */
923#if defined(__sparc__) && !defined(HOST_SOLARIS)
924#undef env
925 env = cpu_single_env;
926#define env cpu_single_env
927#endif
928 next_tb = tcg_qemu_tb_exec(tc_ptr);
929 env->current_tb = NULL;
930 if ((next_tb & 3) == 2) {
931 /* Instruction counter expired. */
932 int insns_left;
933 tb = (TranslationBlock *)(long)(next_tb & ~3);
934 /* Restore PC. */
935 CPU_PC_FROM_TB(env, tb);
936 insns_left = env->icount_decr.u32;
937 if (env->icount_extra && insns_left >= 0) {
938 /* Refill decrementer and continue execution. */
939 env->icount_extra += insns_left;
940 if (env->icount_extra > 0xffff) {
941 insns_left = 0xffff;
942 } else {
943 insns_left = env->icount_extra;
944 }
945 env->icount_extra -= insns_left;
946 env->icount_decr.u16.low = insns_left;
947 } else {
948 if (insns_left > 0) {
949 /* Execute remaining instructions. */
950 cpu_exec_nocache(insns_left, tb);
951 }
952 env->exception_index = EXCP_INTERRUPT;
953 next_tb = 0;
954 cpu_loop_exit();
955 }
956 }
957 }
958 /* reset soft MMU for next block (it can currently
959 only be set by a memory fault) */
960#if defined(USE_KQEMU)
961#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
962 if (kqemu_is_ok(env) &&
963 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
964 cpu_loop_exit();
965 }
966#endif
967 } /* for(;;) */
968 } else {
969 env_to_regs();
970 }
971 } /* for(;;) */
972
973
974#if defined(TARGET_I386)
975 /* restore flags in standard format */
976 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
977#elif defined(TARGET_ARM)
978 /* XXX: Save/restore host fpu exception state?. */
979#elif defined(TARGET_SPARC)
980#elif defined(TARGET_PPC)
981#elif defined(TARGET_M68K)
982 cpu_m68k_flush_flags(env, env->cc_op);
983 env->cc_op = CC_OP_FLAGS;
984 env->sr = (env->sr & 0xffe0)
985 | env->cc_dest | (env->cc_x << 4);
986#elif defined(TARGET_MIPS)
987#elif defined(TARGET_SH4)
988#elif defined(TARGET_ALPHA)
989#elif defined(TARGET_CRIS)
990 /* XXXXX */
991#else
992#error unsupported target CPU
993#endif
994
995 /* restore global registers */
996#include "hostregs_helper.h"
997
998 /* fail safe : never use cpu_single_env outside cpu_exec() */
999 cpu_single_env = NULL;
1000 return ret;
1001}
1002#endif /* !VBOX */
1003
1004/* must only be called from the generated code as an exception can be
1005 generated */
1006void tb_invalidate_page_range(target_ulong start, target_ulong end)
1007{
1008 /* XXX: cannot enable it yet because it yields to MMU exception
1009 where NIP != read address on PowerPC */
1010#if 0
1011 target_ulong phys_addr;
1012 phys_addr = get_phys_addr_code(env, start);
1013 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1014#endif
1015}
1016
1017#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1018
1019void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1020{
1021 CPUX86State *saved_env;
1022
1023 saved_env = env;
1024 env = s;
1025 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1026 selector &= 0xffff;
1027 cpu_x86_load_seg_cache(env, seg_reg, selector,
1028 (selector << 4), 0xffff, 0);
1029 } else {
1030 load_seg(seg_reg, selector);
1031 }
1032 env = saved_env;
1033}
1034
1035void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1036{
1037 CPUX86State *saved_env;
1038
1039 saved_env = env;
1040 env = s;
1041
1042 helper_fsave((target_ulong)ptr, data32);
1043
1044 env = saved_env;
1045}
1046
1047void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1048{
1049 CPUX86State *saved_env;
1050
1051 saved_env = env;
1052 env = s;
1053
1054 helper_frstor((target_ulong)ptr, data32);
1055
1056 env = saved_env;
1057}
1058
1059#endif /* TARGET_I386 */
1060
1061#if !defined(CONFIG_SOFTMMU)
1062
1063#if defined(TARGET_I386)
1064
1065/* 'pc' is the host PC at which the exception was raised. 'address' is
1066 the effective address of the memory exception. 'is_write' is 1 if a
1067 write caused the exception and otherwise 0'. 'old_set' is the
1068 signal set which should be restored */
1069static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1070 int is_write, sigset_t *old_set,
1071 void *puc)
1072{
1073 TranslationBlock *tb;
1074 int ret;
1075
1076 if (cpu_single_env)
1077 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1078#if defined(DEBUG_SIGNAL)
1079 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1080 pc, address, is_write, *(unsigned long *)old_set);
1081#endif
1082 /* XXX: locking issue */
1083 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1084 return 1;
1085 }
1086
1087 /* see if it is an MMU fault */
1088 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1089 ((env->hflags & HF_CPL_MASK) == 3), 0);
1090 if (ret < 0)
1091 return 0; /* not an MMU fault */
1092 if (ret == 0)
1093 return 1; /* the MMU fault was handled without causing real CPU fault */
1094 /* now we have a real cpu fault */
1095 tb = tb_find_pc(pc);
1096 if (tb) {
1097 /* the PC is inside the translated code. It means that we have
1098 a virtual CPU fault */
1099 cpu_restore_state(tb, env, pc, puc);
1100 }
1101 if (ret == 1) {
1102#if 0
1103 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1104 env->eip, env->cr[2], env->error_code);
1105#endif
1106 /* we restore the process signal mask as the sigreturn should
1107 do it (XXX: use sigsetjmp) */
1108 sigprocmask(SIG_SETMASK, old_set, NULL);
1109 raise_exception_err(env->exception_index, env->error_code);
1110 } else {
1111 /* activate soft MMU for this block */
1112 env->hflags |= HF_SOFTMMU_MASK;
1113 cpu_resume_from_signal(env, puc);
1114 }
1115 /* never comes here */
1116 return 1;
1117}
1118
1119#elif defined(TARGET_ARM)
1120static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1121 int is_write, sigset_t *old_set,
1122 void *puc)
1123{
1124 TranslationBlock *tb;
1125 int ret;
1126
1127 if (cpu_single_env)
1128 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1129#if defined(DEBUG_SIGNAL)
1130 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1131 pc, address, is_write, *(unsigned long *)old_set);
1132#endif
1133 /* XXX: locking issue */
1134 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1135 return 1;
1136 }
1137 /* see if it is an MMU fault */
1138 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1139 if (ret < 0)
1140 return 0; /* not an MMU fault */
1141 if (ret == 0)
1142 return 1; /* the MMU fault was handled without causing real CPU fault */
1143 /* now we have a real cpu fault */
1144 tb = tb_find_pc(pc);
1145 if (tb) {
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb, env, pc, puc);
1149 }
1150 /* we restore the process signal mask as the sigreturn should
1151 do it (XXX: use sigsetjmp) */
1152 sigprocmask(SIG_SETMASK, old_set, NULL);
1153 cpu_loop_exit();
1154}
1155#elif defined(TARGET_SPARC)
1156static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1157 int is_write, sigset_t *old_set,
1158 void *puc)
1159{
1160 TranslationBlock *tb;
1161 int ret;
1162
1163 if (cpu_single_env)
1164 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1165#if defined(DEBUG_SIGNAL)
1166 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1167 pc, address, is_write, *(unsigned long *)old_set);
1168#endif
1169 /* XXX: locking issue */
1170 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1171 return 1;
1172 }
1173 /* see if it is an MMU fault */
1174 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1175 if (ret < 0)
1176 return 0; /* not an MMU fault */
1177 if (ret == 0)
1178 return 1; /* the MMU fault was handled without causing real CPU fault */
1179 /* now we have a real cpu fault */
1180 tb = tb_find_pc(pc);
1181 if (tb) {
1182 /* the PC is inside the translated code. It means that we have
1183 a virtual CPU fault */
1184 cpu_restore_state(tb, env, pc, puc);
1185 }
1186 /* we restore the process signal mask as the sigreturn should
1187 do it (XXX: use sigsetjmp) */
1188 sigprocmask(SIG_SETMASK, old_set, NULL);
1189 cpu_loop_exit();
1190}
1191#elif defined (TARGET_PPC)
1192static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1193 int is_write, sigset_t *old_set,
1194 void *puc)
1195{
1196 TranslationBlock *tb;
1197 int ret;
1198
1199 if (cpu_single_env)
1200 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1201#if defined(DEBUG_SIGNAL)
1202 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1203 pc, address, is_write, *(unsigned long *)old_set);
1204#endif
1205 /* XXX: locking issue */
1206 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1207 return 1;
1208 }
1209
1210 /* see if it is an MMU fault */
1211 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1212 if (ret < 0)
1213 return 0; /* not an MMU fault */
1214 if (ret == 0)
1215 return 1; /* the MMU fault was handled without causing real CPU fault */
1216
1217 /* now we have a real cpu fault */
1218 tb = tb_find_pc(pc);
1219 if (tb) {
1220 /* the PC is inside the translated code. It means that we have
1221 a virtual CPU fault */
1222 cpu_restore_state(tb, env, pc, puc);
1223 }
1224 if (ret == 1) {
1225#if 0
1226 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1227 env->nip, env->error_code, tb);
1228#endif
1229 /* we restore the process signal mask as the sigreturn should
1230 do it (XXX: use sigsetjmp) */
1231 sigprocmask(SIG_SETMASK, old_set, NULL);
1232 do_raise_exception_err(env->exception_index, env->error_code);
1233 } else {
1234 /* activate soft MMU for this block */
1235 cpu_resume_from_signal(env, puc);
1236 }
1237 /* never comes here */
1238 return 1;
1239}
1240
1241#elif defined(TARGET_M68K)
1242static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1243 int is_write, sigset_t *old_set,
1244 void *puc)
1245{
1246 TranslationBlock *tb;
1247 int ret;
1248
1249 if (cpu_single_env)
1250 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1251#if defined(DEBUG_SIGNAL)
1252 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1253 pc, address, is_write, *(unsigned long *)old_set);
1254#endif
1255 /* XXX: locking issue */
1256 if (is_write && page_unprotect(address, pc, puc)) {
1257 return 1;
1258 }
1259 /* see if it is an MMU fault */
1260 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1261 if (ret < 0)
1262 return 0; /* not an MMU fault */
1263 if (ret == 0)
1264 return 1; /* the MMU fault was handled without causing real CPU fault */
1265 /* now we have a real cpu fault */
1266 tb = tb_find_pc(pc);
1267 if (tb) {
1268 /* the PC is inside the translated code. It means that we have
1269 a virtual CPU fault */
1270 cpu_restore_state(tb, env, pc, puc);
1271 }
1272 /* we restore the process signal mask as the sigreturn should
1273 do it (XXX: use sigsetjmp) */
1274 sigprocmask(SIG_SETMASK, old_set, NULL);
1275 cpu_loop_exit();
1276 /* never comes here */
1277 return 1;
1278}
1279
1280#elif defined (TARGET_MIPS)
1281static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1282 int is_write, sigset_t *old_set,
1283 void *puc)
1284{
1285 TranslationBlock *tb;
1286 int ret;
1287
1288 if (cpu_single_env)
1289 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1290#if defined(DEBUG_SIGNAL)
1291 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1292 pc, address, is_write, *(unsigned long *)old_set);
1293#endif
1294 /* XXX: locking issue */
1295 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1296 return 1;
1297 }
1298
1299 /* see if it is an MMU fault */
1300 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1301 if (ret < 0)
1302 return 0; /* not an MMU fault */
1303 if (ret == 0)
1304 return 1; /* the MMU fault was handled without causing real CPU fault */
1305
1306 /* now we have a real cpu fault */
1307 tb = tb_find_pc(pc);
1308 if (tb) {
1309 /* the PC is inside the translated code. It means that we have
1310 a virtual CPU fault */
1311 cpu_restore_state(tb, env, pc, puc);
1312 }
1313 if (ret == 1) {
1314#if 0
1315 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1316 env->nip, env->error_code, tb);
1317#endif
1318 /* we restore the process signal mask as the sigreturn should
1319 do it (XXX: use sigsetjmp) */
1320 sigprocmask(SIG_SETMASK, old_set, NULL);
1321 do_raise_exception_err(env->exception_index, env->error_code);
1322 } else {
1323 /* activate soft MMU for this block */
1324 cpu_resume_from_signal(env, puc);
1325 }
1326 /* never comes here */
1327 return 1;
1328}
1329
1330#elif defined (TARGET_SH4)
1331static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1332 int is_write, sigset_t *old_set,
1333 void *puc)
1334{
1335 TranslationBlock *tb;
1336 int ret;
1337
1338 if (cpu_single_env)
1339 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1340#if defined(DEBUG_SIGNAL)
1341 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1342 pc, address, is_write, *(unsigned long *)old_set);
1343#endif
1344 /* XXX: locking issue */
1345 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1346 return 1;
1347 }
1348
1349 /* see if it is an MMU fault */
1350 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1351 if (ret < 0)
1352 return 0; /* not an MMU fault */
1353 if (ret == 0)
1354 return 1; /* the MMU fault was handled without causing real CPU fault */
1355
1356 /* now we have a real cpu fault */
1357 tb = tb_find_pc(pc);
1358 if (tb) {
1359 /* the PC is inside the translated code. It means that we have
1360 a virtual CPU fault */
1361 cpu_restore_state(tb, env, pc, puc);
1362 }
1363#if 0
1364 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1365 env->nip, env->error_code, tb);
1366#endif
1367 /* we restore the process signal mask as the sigreturn should
1368 do it (XXX: use sigsetjmp) */
1369 sigprocmask(SIG_SETMASK, old_set, NULL);
1370 cpu_loop_exit();
1371 /* never comes here */
1372 return 1;
1373}
1374#else
1375#error unsupported target CPU
1376#endif
1377
1378#if defined(__i386__)
1379
1380#if defined(__APPLE__)
1381# include <sys/ucontext.h>
1382
1383# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1384# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1385# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1386#else
1387# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1388# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1389# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1390#endif
1391
1392int cpu_signal_handler(int host_signum, void *pinfo,
1393 void *puc)
1394{
1395 siginfo_t *info = pinfo;
1396 struct ucontext *uc = puc;
1397 unsigned long pc;
1398 int trapno;
1399
1400#ifndef REG_EIP
1401/* for glibc 2.1 */
1402#define REG_EIP EIP
1403#define REG_ERR ERR
1404#define REG_TRAPNO TRAPNO
1405#endif
1406 pc = uc->uc_mcontext.gregs[REG_EIP];
1407 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1408#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1409 if (trapno == 0x00 || trapno == 0x05) {
1410 /* send division by zero or bound exception */
1411 cpu_send_trap(pc, trapno, uc);
1412 return 1;
1413 } else
1414#endif
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 trapno == 0xe ?
1417 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1418 &uc->uc_sigmask, puc);
1419}
1420
1421#elif defined(__x86_64__)
1422
1423int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1425{
1426 siginfo_t *info = pinfo;
1427 struct ucontext *uc = puc;
1428 unsigned long pc;
1429
1430 pc = uc->uc_mcontext.gregs[REG_RIP];
1431 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1432 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1433 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1434 &uc->uc_sigmask, puc);
1435}
1436
1437#elif defined(__powerpc__)
1438
1439/***********************************************************************
1440 * signal context platform-specific definitions
1441 * From Wine
1442 */
1443#ifdef linux
1444/* All Registers access - only for local access */
1445# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1446/* Gpr Registers access */
1447# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1448# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1449# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1450# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1451# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1452# define LR_sig(context) REG_sig(link, context) /* Link register */
1453# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1454/* Float Registers access */
1455# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1456# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1457/* Exception Registers access */
1458# define DAR_sig(context) REG_sig(dar, context)
1459# define DSISR_sig(context) REG_sig(dsisr, context)
1460# define TRAP_sig(context) REG_sig(trap, context)
1461#endif /* linux */
1462
1463#ifdef __APPLE__
1464# include <sys/ucontext.h>
1465typedef struct ucontext SIGCONTEXT;
1466/* All Registers access - only for local access */
1467# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1468# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1469# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1470# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1471/* Gpr Registers access */
1472# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1473# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1474# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1475# define CTR_sig(context) REG_sig(ctr, context)
1476# define XER_sig(context) REG_sig(xer, context) /* Link register */
1477# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1478# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1479/* Float Registers access */
1480# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1481# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1482/* Exception Registers access */
1483# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1484# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1485# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1486#endif /* __APPLE__ */
1487
1488int cpu_signal_handler(int host_signum, void *pinfo,
1489 void *puc)
1490{
1491 siginfo_t *info = pinfo;
1492 struct ucontext *uc = puc;
1493 unsigned long pc;
1494 int is_write;
1495
1496 pc = IAR_sig(uc);
1497 is_write = 0;
1498#if 0
1499 /* ppc 4xx case */
1500 if (DSISR_sig(uc) & 0x00800000)
1501 is_write = 1;
1502#else
1503 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1504 is_write = 1;
1505#endif
1506 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1507 is_write, &uc->uc_sigmask, puc);
1508}
1509
1510#elif defined(__alpha__)
1511
1512int cpu_signal_handler(int host_signum, void *pinfo,
1513 void *puc)
1514{
1515 siginfo_t *info = pinfo;
1516 struct ucontext *uc = puc;
1517 uint32_t *pc = uc->uc_mcontext.sc_pc;
1518 uint32_t insn = *pc;
1519 int is_write = 0;
1520
1521 /* XXX: need kernel patch to get write flag faster */
1522 switch (insn >> 26) {
1523 case 0x0d: // stw
1524 case 0x0e: // stb
1525 case 0x0f: // stq_u
1526 case 0x24: // stf
1527 case 0x25: // stg
1528 case 0x26: // sts
1529 case 0x27: // stt
1530 case 0x2c: // stl
1531 case 0x2d: // stq
1532 case 0x2e: // stl_c
1533 case 0x2f: // stq_c
1534 is_write = 1;
1535 }
1536
1537 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1538 is_write, &uc->uc_sigmask, puc);
1539}
1540#elif defined(__sparc__)
1541
1542int cpu_signal_handler(int host_signum, void *pinfo,
1543 void *puc)
1544{
1545 siginfo_t *info = pinfo;
1546 uint32_t *regs = (uint32_t *)(info + 1);
1547 void *sigmask = (regs + 20);
1548 unsigned long pc;
1549 int is_write;
1550 uint32_t insn;
1551
1552 /* XXX: is there a standard glibc define ? */
1553 pc = regs[1];
1554 /* XXX: need kernel patch to get write flag faster */
1555 is_write = 0;
1556 insn = *(uint32_t *)pc;
1557 if ((insn >> 30) == 3) {
1558 switch((insn >> 19) & 0x3f) {
1559 case 0x05: // stb
1560 case 0x06: // sth
1561 case 0x04: // st
1562 case 0x07: // std
1563 case 0x24: // stf
1564 case 0x27: // stdf
1565 case 0x25: // stfsr
1566 is_write = 1;
1567 break;
1568 }
1569 }
1570 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1571 is_write, sigmask, NULL);
1572}
1573
1574#elif defined(__arm__)
1575
1576int cpu_signal_handler(int host_signum, void *pinfo,
1577 void *puc)
1578{
1579 siginfo_t *info = pinfo;
1580 struct ucontext *uc = puc;
1581 unsigned long pc;
1582 int is_write;
1583
1584 pc = uc->uc_mcontext.gregs[R15];
1585 /* XXX: compute is_write */
1586 is_write = 0;
1587 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1588 is_write,
1589 &uc->uc_sigmask, puc);
1590}
1591
1592#elif defined(__mc68000)
1593
1594int cpu_signal_handler(int host_signum, void *pinfo,
1595 void *puc)
1596{
1597 siginfo_t *info = pinfo;
1598 struct ucontext *uc = puc;
1599 unsigned long pc;
1600 int is_write;
1601
1602 pc = uc->uc_mcontext.gregs[16];
1603 /* XXX: compute is_write */
1604 is_write = 0;
1605 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1606 is_write,
1607 &uc->uc_sigmask, puc);
1608}
1609
1610#elif defined(__ia64)
1611
1612#ifndef __ISR_VALID
1613 /* This ought to be in <bits/siginfo.h>... */
1614# define __ISR_VALID 1
1615#endif
1616
1617int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1618{
1619 siginfo_t *info = pinfo;
1620 struct ucontext *uc = puc;
1621 unsigned long ip;
1622 int is_write = 0;
1623
1624 ip = uc->uc_mcontext.sc_ip;
1625 switch (host_signum) {
1626 case SIGILL:
1627 case SIGFPE:
1628 case SIGSEGV:
1629 case SIGBUS:
1630 case SIGTRAP:
1631 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1632 /* ISR.W (write-access) is bit 33: */
1633 is_write = (info->si_isr >> 33) & 1;
1634 break;
1635
1636 default:
1637 break;
1638 }
1639 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1640 is_write,
1641 &uc->uc_sigmask, puc);
1642}
1643
1644#elif defined(__s390__)
1645
1646int cpu_signal_handler(int host_signum, void *pinfo,
1647 void *puc)
1648{
1649 siginfo_t *info = pinfo;
1650 struct ucontext *uc = puc;
1651 unsigned long pc;
1652 int is_write;
1653
1654 pc = uc->uc_mcontext.psw.addr;
1655 /* XXX: compute is_write */
1656 is_write = 0;
1657 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1658 is_write,
1659 &uc->uc_sigmask, puc);
1660}
1661
1662#else
1663
1664#error host CPU specific signal handler needed
1665
1666#endif
1667
1668#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette