VirtualBox

source: vbox/trunk/src/recompiler_new/cpu-exec.c@ 13458

最後變更 在這個檔案從13458是 13440,由 vboxsync 提交於 16 年 前

further MSVC stuff, almost there

  • 屬性 svn:eol-style 設為 native
檔案大小: 60.2 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/*
22 * Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
23 * other than GPL or LGPL is available it will apply instead, Sun elects to use only
24 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
25 * a choice of LGPL license versions is made available with the language indicating
26 * that LGPLv2 or any later version may be used, or where a choice of which version
27 * of the LGPL is applied is otherwise unspecified.
28 */
29#include "config.h"
30#define CPU_NO_GLOBAL_REGS
31#include "exec.h"
32#include "disas.h"
33#include "tcg.h"
34
35#if !defined(CONFIG_SOFTMMU)
36#undef EAX
37#undef ECX
38#undef EDX
39#undef EBX
40#undef ESP
41#undef EBP
42#undef ESI
43#undef EDI
44#undef EIP
45#include <signal.h>
46#include <sys/ucontext.h>
47#endif
48
49#if defined(__sparc__) && !defined(HOST_SOLARIS)
50// Work around ugly bugs in glibc that mangle global register contents
51#undef env
52#define env cpu_single_env
53#endif
54
55int tb_invalidated_flag;
56
57//#define DEBUG_EXEC
58//#define DEBUG_SIGNAL
59
60
61void cpu_loop_exit(void)
62{
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
67}
68
69#if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
70#define reg_T2
71#endif
72
73/* exit the current TB from a signal handler. The host registers are
74 restored in a state compatible with the CPU emulator
75 */
76void cpu_resume_from_signal(CPUState *env1, void *puc)
77{
78#if !defined(CONFIG_SOFTMMU)
79 struct ucontext *uc = puc;
80#endif
81
82 env = env1;
83
84 /* XXX: restore cpu registers saved in host registers */
85
86#if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
90 }
91#endif
92 longjmp(env->jmp_env, 1);
93}
94
95/* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98{
99 unsigned long next_tb;
100 TranslationBlock *tb;
101
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
106
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
112
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
117 }
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
120}
121
122static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
125{
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
129
130 tb_invalidated_flag = 0;
131
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
133
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
157 }
158 }
159 ptb1 = &tb->phys_hash_next;
160 }
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
169}
170
171#ifndef VBOX
172static inline TranslationBlock *tb_find_fast(void)
173#else
174DECLINLINE(TranslationBlock *) tb_find_fast(void)
175#endif
176{
177 TranslationBlock *tb;
178 target_ulong cs_base, pc;
179 uint64_t flags;
180
181 /* we record a subset of the CPU state. It will
182 always be the same before a given translated block
183 is executed. */
184#if defined(TARGET_I386)
185 flags = env->hflags;
186 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
187 cs_base = env->segs[R_CS].base;
188 pc = cs_base + env->eip;
189#elif defined(TARGET_ARM)
190 flags = env->thumb | (env->vfp.vec_len << 1)
191 | (env->vfp.vec_stride << 4);
192 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
193 flags |= (1 << 6);
194 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
195 flags |= (1 << 7);
196 flags |= (env->condexec_bits << 8);
197 cs_base = 0;
198 pc = env->regs[15];
199#elif defined(TARGET_SPARC)
200#ifdef TARGET_SPARC64
201 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
202 flags = ((env->pstate & PS_AM) << 2)
203 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
204 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
205#else
206 // FPU enable . Supervisor
207 flags = (env->psref << 4) | env->psrs;
208#endif
209 cs_base = env->npc;
210 pc = env->pc;
211#elif defined(TARGET_PPC)
212 flags = env->hflags;
213 cs_base = 0;
214 pc = env->nip;
215#elif defined(TARGET_MIPS)
216 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
217 cs_base = 0;
218 pc = env->active_tc.PC;
219#elif defined(TARGET_M68K)
220 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
221 | (env->sr & SR_S) /* Bit 13 */
222 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
223 cs_base = 0;
224 pc = env->pc;
225#elif defined(TARGET_SH4)
226 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
227 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
228 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
229 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
230 cs_base = 0;
231 pc = env->pc;
232#elif defined(TARGET_ALPHA)
233 flags = env->ps;
234 cs_base = 0;
235 pc = env->pc;
236#elif defined(TARGET_CRIS)
237 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
238 flags |= env->dslot;
239 cs_base = 0;
240 pc = env->pc;
241#else
242#error unsupported CPU
243#endif
244 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
245 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
246 tb->flags != flags)) {
247 tb = tb_find_slow(pc, cs_base, flags);
248 }
249 return tb;
250}
251
252/* main execution loop */
253
254#ifdef VBOX
255
256int cpu_exec(CPUState *env1)
257{
258#define DECLARE_HOST_REGS 1
259#include "hostregs_helper.h"
260 int ret, interrupt_request;
261 TranslationBlock *tb;
262 uint8_t *tc_ptr;
263 unsigned long next_tb;
264
265 if (cpu_halted(env1) == EXCP_HALTED)
266 return EXCP_HALTED;
267
268 cpu_single_env = env1;
269
270 /* first we save global registers */
271#define SAVE_HOST_REGS 1
272#include "hostregs_helper.h"
273 env = env1;
274
275 env_to_regs();
276#if defined(TARGET_I386)
277 /* put eflags in CPU temporary format */
278 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
279 DF = 1 - (2 * ((env->eflags >> 10) & 1));
280 CC_OP = CC_OP_EFLAGS;
281 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
282#elif defined(TARGET_SPARC)
283#elif defined(TARGET_M68K)
284 env->cc_op = CC_OP_FLAGS;
285 env->cc_dest = env->sr & 0xf;
286 env->cc_x = (env->sr >> 4) & 1;
287#elif defined(TARGET_ALPHA)
288#elif defined(TARGET_ARM)
289#elif defined(TARGET_PPC)
290#elif defined(TARGET_MIPS)
291#elif defined(TARGET_SH4)
292#elif defined(TARGET_CRIS)
293 /* XXXXX */
294#else
295#error unsupported target CPU
296#endif
297#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
298 env->exception_index = -1;
299#endif
300
301 /* prepare setjmp context for exception handling */
302 for(;;) {
303 if (setjmp(env->jmp_env) == 0)
304 {
305 env->current_tb = NULL;
306 VMMR3Unlock(env->pVM);
307 VMMR3Lock(env->pVM);
308
309 /*
310 * Check for fatal errors first
311 */
312 if (env->interrupt_request & CPU_INTERRUPT_RC) {
313 env->exception_index = EXCP_RC;
314 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
315 ret = env->exception_index;
316 cpu_loop_exit();
317 }
318
319 /* if an exception is pending, we execute it here */
320 if (env->exception_index >= 0) {
321 Assert(!env->user_mode_only);
322 if (env->exception_index >= EXCP_INTERRUPT) {
323 /* exit request from the cpu execution loop */
324 ret = env->exception_index;
325 break;
326 } else {
327 /* simulate a real cpu exception. On i386, it can
328 trigger new exceptions, but we do not handle
329 double or triple faults yet. */
330 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
331 Log(("do_interrupt %d %d %VGv\n", env->exception_index, env->exception_is_int, env->exception_next_eip));
332 do_interrupt(env->exception_index,
333 env->exception_is_int,
334 env->error_code,
335 env->exception_next_eip, 0);
336 /* successfully delivered */
337 env->old_exception = -1;
338 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
339 }
340 env->exception_index = -1;
341 }
342
343 next_tb = 0; /* force lookup of first TB */
344 for(;;)
345 {
346 interrupt_request = env->interrupt_request;
347#ifndef VBOX
348 if (__builtin_expect(interrupt_request, 0))
349#else
350 if (RT_UNLIKELY(interrupt_request != 0))
351#endif
352 {
353 /** @todo: reconscille with what QEMU really does */
354
355 /* Single instruction exec request, we execute it and return (one way or the other).
356 The caller will always reschedule after doing this operation! */
357 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
358 {
359 /* not in flight are we? (if we are, we trapped) */
360 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
361 {
362 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
363 env->exception_index = EXCP_SINGLE_INSTR;
364 if (emulate_single_instr(env) == -1)
365 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%VGv!!\n", env->eip));
366
367 /* When we receive an external interrupt during execution of this single
368 instruction, then we should stay here. We will leave when we're ready
369 for raw-mode or when interrupted by pending EMT requests. */
370 interrupt_request = env->interrupt_request; /* reload this! */
371 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
372 || !(env->eflags & IF_MASK)
373 || (env->hflags & HF_INHIBIT_IRQ_MASK)
374 || (env->state & CPU_RAW_HWACC)
375 )
376 {
377 env->exception_index = ret = EXCP_SINGLE_INSTR;
378 cpu_loop_exit();
379 }
380 }
381 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
382 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
383 }
384
385 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
386 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
387 !(env->hflags & HF_SMM_MASK)) {
388 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
389 do_smm_enter();
390 next_tb = 0;
391 }
392 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
393 (env->eflags & IF_MASK) &&
394 !(env->hflags & HF_INHIBIT_IRQ_MASK))
395 {
396 /* if hardware interrupt pending, we execute it */
397 int intno;
398 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
399 intno = cpu_get_pic_interrupt(env);
400 if (intno >= 0)
401 {
402 Log(("do_interrupt %d\n", intno));
403 do_interrupt(intno, 0, 0, 0, 1);
404 }
405 /* ensure that no TB jump will be modified as
406 the program flow was changed */
407 next_tb = 0;
408 }
409 if (env->interrupt_request & CPU_INTERRUPT_EXITTB)
410 {
411 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
412 /* ensure that no TB jump will be modified as
413 the program flow was changed */
414 next_tb = 0;
415 }
416 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
417 if (interrupt_request & CPU_INTERRUPT_EXIT)
418 {
419 env->exception_index = EXCP_INTERRUPT;
420 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXIT);
421 ret = env->exception_index;
422 cpu_loop_exit();
423 }
424 if (interrupt_request & CPU_INTERRUPT_RC)
425 {
426 env->exception_index = EXCP_RC;
427 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
428 ret = env->exception_index;
429 cpu_loop_exit();
430 }
431 }
432
433 /*
434 * Check if we the CPU state allows us to execute the code in raw-mode.
435 */
436 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
437 if (remR3CanExecuteRaw(env,
438 env->eip + env->segs[R_CS].base,
439 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
440 &env->exception_index))
441 {
442 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
443 ret = env->exception_index;
444 cpu_loop_exit();
445 }
446 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
447
448 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
449 spin_lock(&tb_lock);
450 tb = tb_find_fast();
451 /* Note: we do it here to avoid a gcc bug on Mac OS X when
452 doing it in tb_find_slow */
453 if (tb_invalidated_flag) {
454 /* as some TB could have been invalidated because
455 of memory exceptions while generating the code, we
456 must recompute the hash index here */
457 next_tb = 0;
458 tb_invalidated_flag = 0;
459 }
460
461 /* see if we can patch the calling TB. When the TB
462 spans two pages, we cannot safely do a direct
463 jump. */
464 if (next_tb != 0
465 && !(tb->cflags & CF_RAW_MODE)
466 && tb->page_addr[1] == -1)
467 {
468 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb);
469 }
470 spin_unlock(&tb_lock);
471 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
472
473 env->current_tb = tb;
474 while (env->current_tb) {
475 tc_ptr = tb->tc_ptr;
476 /* execute the generated code */
477 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
478 next_tb = tcg_qemu_tb_exec(tc_ptr);
479 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
480 env->current_tb = NULL;
481 if ((next_tb & 3) == 2) {
482 /* Instruction counter expired. */
483 int insns_left;
484 tb = (TranslationBlock *)(long)(next_tb & ~3);
485 /* Restore PC. */
486 CPU_PC_FROM_TB(env, tb);
487 insns_left = env->icount_decr.u32;
488 if (env->icount_extra && insns_left >= 0) {
489 /* Refill decrementer and continue execution. */
490 env->icount_extra += insns_left;
491 if (env->icount_extra > 0xffff) {
492 insns_left = 0xffff;
493 } else {
494 insns_left = env->icount_extra;
495 }
496 env->icount_extra -= insns_left;
497 env->icount_decr.u16.low = insns_left;
498 } else {
499 if (insns_left > 0) {
500 /* Execute remaining instructions. */
501 cpu_exec_nocache(insns_left, tb);
502 }
503 env->exception_index = EXCP_INTERRUPT;
504 next_tb = 0;
505 cpu_loop_exit();
506 }
507 }
508 }
509
510 /* reset soft MMU for next block (it can currently
511 only be set by a memory fault) */
512#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
513 if (env->hflags & HF_SOFTMMU_MASK) {
514 env->hflags &= ~HF_SOFTMMU_MASK;
515 /* do not allow linking to another block */
516 next_tb = 0;
517 }
518#endif
519 } /* for(;;) */
520 } else {
521 env_to_regs();
522 }
523#ifdef VBOX_HIGH_RES_TIMERS_HACK
524 /* NULL the current_tb here so cpu_interrupt() doesn't do
525 anything unnecessary (like crashing during emulate single instruction). */
526 env->current_tb = NULL;
527 TMTimerPoll(env1->pVM);
528#endif
529 } /* for(;;) */
530
531#if defined(TARGET_I386)
532 /* restore flags in standard format */
533 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
534#else
535#error unsupported target CPU
536#endif
537#include "hostregs_helper.h"
538 return ret;
539}
540
541#else /* !VBOX */
542int cpu_exec(CPUState *env1)
543{
544#define DECLARE_HOST_REGS 1
545#include "hostregs_helper.h"
546 int ret, interrupt_request;
547 TranslationBlock *tb;
548 uint8_t *tc_ptr;
549 unsigned long next_tb;
550
551 if (cpu_halted(env1) == EXCP_HALTED)
552 return EXCP_HALTED;
553
554 cpu_single_env = env1;
555
556 /* first we save global registers */
557#define SAVE_HOST_REGS 1
558#include "hostregs_helper.h"
559 env = env1;
560
561 env_to_regs();
562#if defined(TARGET_I386)
563 /* put eflags in CPU temporary format */
564 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
565 DF = 1 - (2 * ((env->eflags >> 10) & 1));
566 CC_OP = CC_OP_EFLAGS;
567 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
568#elif defined(TARGET_SPARC)
569#elif defined(TARGET_M68K)
570 env->cc_op = CC_OP_FLAGS;
571 env->cc_dest = env->sr & 0xf;
572 env->cc_x = (env->sr >> 4) & 1;
573#elif defined(TARGET_ALPHA)
574#elif defined(TARGET_ARM)
575#elif defined(TARGET_PPC)
576#elif defined(TARGET_MIPS)
577#elif defined(TARGET_SH4)
578#elif defined(TARGET_CRIS)
579 /* XXXXX */
580#else
581#error unsupported target CPU
582#endif
583 env->exception_index = -1;
584
585 /* prepare setjmp context for exception handling */
586 for(;;) {
587 if (setjmp(env->jmp_env) == 0) {
588 env->current_tb = NULL;
589 /* if an exception is pending, we execute it here */
590 if (env->exception_index >= 0) {
591 if (env->exception_index >= EXCP_INTERRUPT) {
592 /* exit request from the cpu execution loop */
593 ret = env->exception_index;
594 break;
595 } else if (env->user_mode_only) {
596 /* if user mode only, we simulate a fake exception
597 which will be handled outside the cpu execution
598 loop */
599#if defined(TARGET_I386)
600 do_interrupt_user(env->exception_index,
601 env->exception_is_int,
602 env->error_code,
603 env->exception_next_eip);
604 /* successfully delivered */
605 env->old_exception = -1;
606#endif
607 ret = env->exception_index;
608 break;
609 } else {
610#if defined(TARGET_I386)
611 /* simulate a real cpu exception. On i386, it can
612 trigger new exceptions, but we do not handle
613 double or triple faults yet. */
614 do_interrupt(env->exception_index,
615 env->exception_is_int,
616 env->error_code,
617 env->exception_next_eip, 0);
618 /* successfully delivered */
619 env->old_exception = -1;
620#elif defined(TARGET_PPC)
621 do_interrupt(env);
622#elif defined(TARGET_MIPS)
623 do_interrupt(env);
624#elif defined(TARGET_SPARC)
625 do_interrupt(env);
626#elif defined(TARGET_ARM)
627 do_interrupt(env);
628#elif defined(TARGET_SH4)
629 do_interrupt(env);
630#elif defined(TARGET_ALPHA)
631 do_interrupt(env);
632#elif defined(TARGET_CRIS)
633 do_interrupt(env);
634#elif defined(TARGET_M68K)
635 do_interrupt(0);
636#endif
637 }
638 env->exception_index = -1;
639 }
640#ifdef USE_KQEMU
641 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
642 int ret;
643 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
644 ret = kqemu_cpu_exec(env);
645 /* put eflags in CPU temporary format */
646 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
647 DF = 1 - (2 * ((env->eflags >> 10) & 1));
648 CC_OP = CC_OP_EFLAGS;
649 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
650 if (ret == 1) {
651 /* exception */
652 longjmp(env->jmp_env, 1);
653 } else if (ret == 2) {
654 /* softmmu execution needed */
655 } else {
656 if (env->interrupt_request != 0) {
657 /* hardware interrupt will be executed just after */
658 } else {
659 /* otherwise, we restart */
660 longjmp(env->jmp_env, 1);
661 }
662 }
663 }
664#endif
665
666 next_tb = 0; /* force lookup of first TB */
667 for(;;) {
668 interrupt_request = env->interrupt_request;
669 if (unlikely(interrupt_request) &&
670 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
671 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
672 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
673 env->exception_index = EXCP_DEBUG;
674 cpu_loop_exit();
675 }
676#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
677 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
678 if (interrupt_request & CPU_INTERRUPT_HALT) {
679 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
680 env->halted = 1;
681 env->exception_index = EXCP_HLT;
682 cpu_loop_exit();
683 }
684#endif
685#if defined(TARGET_I386)
686 if (env->hflags2 & HF2_GIF_MASK) {
687 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
688 !(env->hflags & HF_SMM_MASK)) {
689 svm_check_intercept(SVM_EXIT_SMI);
690 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
691 do_smm_enter();
692 next_tb = 0;
693 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
694 !(env->hflags2 & HF2_NMI_MASK)) {
695 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
696 env->hflags2 |= HF2_NMI_MASK;
697 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
698 next_tb = 0;
699 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
700 (((env->hflags2 & HF2_VINTR_MASK) &&
701 (env->hflags2 & HF2_HIF_MASK)) ||
702 (!(env->hflags2 & HF2_VINTR_MASK) &&
703 (env->eflags & IF_MASK &&
704 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
705 int intno;
706 svm_check_intercept(SVM_EXIT_INTR);
707 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
708 intno = cpu_get_pic_interrupt(env);
709 if (loglevel & CPU_LOG_TB_IN_ASM) {
710 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
711 }
712 do_interrupt(intno, 0, 0, 0, 1);
713 /* ensure that no TB jump will be modified as
714 the program flow was changed */
715 next_tb = 0;
716#if !defined(CONFIG_USER_ONLY)
717 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
718 (env->eflags & IF_MASK) &&
719 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
720 int intno;
721 /* FIXME: this should respect TPR */
722 svm_check_intercept(SVM_EXIT_VINTR);
723 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
724 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
725 if (loglevel & CPU_LOG_TB_IN_ASM)
726 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
727 do_interrupt(intno, 0, 0, 0, 1);
728 next_tb = 0;
729#endif
730 }
731 }
732#elif defined(TARGET_PPC)
733#if 0
734 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
735 cpu_ppc_reset(env);
736 }
737#endif
738 if (interrupt_request & CPU_INTERRUPT_HARD) {
739 ppc_hw_interrupt(env);
740 if (env->pending_interrupts == 0)
741 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
742 next_tb = 0;
743 }
744#elif defined(TARGET_MIPS)
745 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
746 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
747 (env->CP0_Status & (1 << CP0St_IE)) &&
748 !(env->CP0_Status & (1 << CP0St_EXL)) &&
749 !(env->CP0_Status & (1 << CP0St_ERL)) &&
750 !(env->hflags & MIPS_HFLAG_DM)) {
751 /* Raise it */
752 env->exception_index = EXCP_EXT_INTERRUPT;
753 env->error_code = 0;
754 do_interrupt(env);
755 next_tb = 0;
756 }
757#elif defined(TARGET_SPARC)
758 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
759 (env->psret != 0)) {
760 int pil = env->interrupt_index & 15;
761 int type = env->interrupt_index & 0xf0;
762
763 if (((type == TT_EXTINT) &&
764 (pil == 15 || pil > env->psrpil)) ||
765 type != TT_EXTINT) {
766 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
767 env->exception_index = env->interrupt_index;
768 do_interrupt(env);
769 env->interrupt_index = 0;
770#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
771 cpu_check_irqs(env);
772#endif
773 next_tb = 0;
774 }
775 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
776 //do_interrupt(0, 0, 0, 0, 0);
777 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
778 }
779#elif defined(TARGET_ARM)
780 if (interrupt_request & CPU_INTERRUPT_FIQ
781 && !(env->uncached_cpsr & CPSR_F)) {
782 env->exception_index = EXCP_FIQ;
783 do_interrupt(env);
784 next_tb = 0;
785 }
786 /* ARMv7-M interrupt return works by loading a magic value
787 into the PC. On real hardware the load causes the
788 return to occur. The qemu implementation performs the
789 jump normally, then does the exception return when the
790 CPU tries to execute code at the magic address.
791 This will cause the magic PC value to be pushed to
792 the stack if an interrupt occured at the wrong time.
793 We avoid this by disabling interrupts when
794 pc contains a magic address. */
795 if (interrupt_request & CPU_INTERRUPT_HARD
796 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
797 || !(env->uncached_cpsr & CPSR_I))) {
798 env->exception_index = EXCP_IRQ;
799 do_interrupt(env);
800 next_tb = 0;
801 }
802#elif defined(TARGET_SH4)
803 if (interrupt_request & CPU_INTERRUPT_HARD) {
804 do_interrupt(env);
805 next_tb = 0;
806 }
807#elif defined(TARGET_ALPHA)
808 if (interrupt_request & CPU_INTERRUPT_HARD) {
809 do_interrupt(env);
810 next_tb = 0;
811 }
812#elif defined(TARGET_CRIS)
813 if (interrupt_request & CPU_INTERRUPT_HARD
814 && (env->pregs[PR_CCS] & I_FLAG)) {
815 env->exception_index = EXCP_IRQ;
816 do_interrupt(env);
817 next_tb = 0;
818 }
819 if (interrupt_request & CPU_INTERRUPT_NMI
820 && (env->pregs[PR_CCS] & M_FLAG)) {
821 env->exception_index = EXCP_NMI;
822 do_interrupt(env);
823 next_tb = 0;
824 }
825#elif defined(TARGET_M68K)
826 if (interrupt_request & CPU_INTERRUPT_HARD
827 && ((env->sr & SR_I) >> SR_I_SHIFT)
828 < env->pending_level) {
829 /* Real hardware gets the interrupt vector via an
830 IACK cycle at this point. Current emulated
831 hardware doesn't rely on this, so we
832 provide/save the vector when the interrupt is
833 first signalled. */
834 env->exception_index = env->pending_vector;
835 do_interrupt(1);
836 next_tb = 0;
837 }
838#endif
839 /* Don't use the cached interupt_request value,
840 do_interrupt may have updated the EXITTB flag. */
841 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
842 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
843 /* ensure that no TB jump will be modified as
844 the program flow was changed */
845 next_tb = 0;
846 }
847 if (interrupt_request & CPU_INTERRUPT_EXIT) {
848 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
849 env->exception_index = EXCP_INTERRUPT;
850 cpu_loop_exit();
851 }
852 }
853#ifdef DEBUG_EXEC
854 if ((loglevel & CPU_LOG_TB_CPU)) {
855 /* restore flags in standard format */
856 regs_to_env();
857#if defined(TARGET_I386)
858 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
859 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
860 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
861#elif defined(TARGET_ARM)
862 cpu_dump_state(env, logfile, fprintf, 0);
863#elif defined(TARGET_SPARC)
864 cpu_dump_state(env, logfile, fprintf, 0);
865#elif defined(TARGET_PPC)
866 cpu_dump_state(env, logfile, fprintf, 0);
867#elif defined(TARGET_M68K)
868 cpu_m68k_flush_flags(env, env->cc_op);
869 env->cc_op = CC_OP_FLAGS;
870 env->sr = (env->sr & 0xffe0)
871 | env->cc_dest | (env->cc_x << 4);
872 cpu_dump_state(env, logfile, fprintf, 0);
873#elif defined(TARGET_MIPS)
874 cpu_dump_state(env, logfile, fprintf, 0);
875#elif defined(TARGET_SH4)
876 cpu_dump_state(env, logfile, fprintf, 0);
877#elif defined(TARGET_ALPHA)
878 cpu_dump_state(env, logfile, fprintf, 0);
879#elif defined(TARGET_CRIS)
880 cpu_dump_state(env, logfile, fprintf, 0);
881#else
882#error unsupported target CPU
883#endif
884 }
885#endif
886 spin_lock(&tb_lock);
887 tb = tb_find_fast();
888 /* Note: we do it here to avoid a gcc bug on Mac OS X when
889 doing it in tb_find_slow */
890 if (tb_invalidated_flag) {
891 /* as some TB could have been invalidated because
892 of memory exceptions while generating the code, we
893 must recompute the hash index here */
894 next_tb = 0;
895 tb_invalidated_flag = 0;
896 }
897#ifdef DEBUG_EXEC
898 if ((loglevel & CPU_LOG_EXEC)) {
899 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
900 (long)tb->tc_ptr, tb->pc,
901 lookup_symbol(tb->pc));
902 }
903#endif
904 /* see if we can patch the calling TB. When the TB
905 spans two pages, we cannot safely do a direct
906 jump. */
907 {
908 if (next_tb != 0 &&
909#ifdef USE_KQEMU
910 (env->kqemu_enabled != 2) &&
911#endif
912 tb->page_addr[1] == -1) {
913 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
914 }
915 }
916 spin_unlock(&tb_lock);
917 env->current_tb = tb;
918 while (env->current_tb) {
919 tc_ptr = tb->tc_ptr;
920 /* execute the generated code */
921#if defined(__sparc__) && !defined(HOST_SOLARIS)
922#undef env
923 env = cpu_single_env;
924#define env cpu_single_env
925#endif
926 next_tb = tcg_qemu_tb_exec(tc_ptr);
927 env->current_tb = NULL;
928 if ((next_tb & 3) == 2) {
929 /* Instruction counter expired. */
930 int insns_left;
931 tb = (TranslationBlock *)(long)(next_tb & ~3);
932 /* Restore PC. */
933 CPU_PC_FROM_TB(env, tb);
934 insns_left = env->icount_decr.u32;
935 if (env->icount_extra && insns_left >= 0) {
936 /* Refill decrementer and continue execution. */
937 env->icount_extra += insns_left;
938 if (env->icount_extra > 0xffff) {
939 insns_left = 0xffff;
940 } else {
941 insns_left = env->icount_extra;
942 }
943 env->icount_extra -= insns_left;
944 env->icount_decr.u16.low = insns_left;
945 } else {
946 if (insns_left > 0) {
947 /* Execute remaining instructions. */
948 cpu_exec_nocache(insns_left, tb);
949 }
950 env->exception_index = EXCP_INTERRUPT;
951 next_tb = 0;
952 cpu_loop_exit();
953 }
954 }
955 }
956 /* reset soft MMU for next block (it can currently
957 only be set by a memory fault) */
958#if defined(USE_KQEMU)
959#define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
960 if (kqemu_is_ok(env) &&
961 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
962 cpu_loop_exit();
963 }
964#endif
965 } /* for(;;) */
966 } else {
967 env_to_regs();
968 }
969 } /* for(;;) */
970
971
972#if defined(TARGET_I386)
973 /* restore flags in standard format */
974 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
975#elif defined(TARGET_ARM)
976 /* XXX: Save/restore host fpu exception state?. */
977#elif defined(TARGET_SPARC)
978#elif defined(TARGET_PPC)
979#elif defined(TARGET_M68K)
980 cpu_m68k_flush_flags(env, env->cc_op);
981 env->cc_op = CC_OP_FLAGS;
982 env->sr = (env->sr & 0xffe0)
983 | env->cc_dest | (env->cc_x << 4);
984#elif defined(TARGET_MIPS)
985#elif defined(TARGET_SH4)
986#elif defined(TARGET_ALPHA)
987#elif defined(TARGET_CRIS)
988 /* XXXXX */
989#else
990#error unsupported target CPU
991#endif
992
993 /* restore global registers */
994#include "hostregs_helper.h"
995
996 /* fail safe : never use cpu_single_env outside cpu_exec() */
997 cpu_single_env = NULL;
998 return ret;
999}
1000#endif /* !VBOX */
1001
1002/* must only be called from the generated code as an exception can be
1003 generated */
1004void tb_invalidate_page_range(target_ulong start, target_ulong end)
1005{
1006 /* XXX: cannot enable it yet because it yields to MMU exception
1007 where NIP != read address on PowerPC */
1008#if 0
1009 target_ulong phys_addr;
1010 phys_addr = get_phys_addr_code(env, start);
1011 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
1012#endif
1013}
1014
1015#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
1016
1017void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
1018{
1019 CPUX86State *saved_env;
1020
1021 saved_env = env;
1022 env = s;
1023 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
1024 selector &= 0xffff;
1025 cpu_x86_load_seg_cache(env, seg_reg, selector,
1026 (selector << 4), 0xffff, 0);
1027 } else {
1028 load_seg(seg_reg, selector);
1029 }
1030 env = saved_env;
1031}
1032
1033void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32)
1034{
1035 CPUX86State *saved_env;
1036
1037 saved_env = env;
1038 env = s;
1039
1040 helper_fsave((target_ulong)ptr, data32);
1041
1042 env = saved_env;
1043}
1044
1045void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32)
1046{
1047 CPUX86State *saved_env;
1048
1049 saved_env = env;
1050 env = s;
1051
1052 helper_frstor((target_ulong)ptr, data32);
1053
1054 env = saved_env;
1055}
1056
1057#endif /* TARGET_I386 */
1058
1059#if !defined(CONFIG_SOFTMMU)
1060
1061#if defined(TARGET_I386)
1062
1063/* 'pc' is the host PC at which the exception was raised. 'address' is
1064 the effective address of the memory exception. 'is_write' is 1 if a
1065 write caused the exception and otherwise 0'. 'old_set' is the
1066 signal set which should be restored */
1067static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1068 int is_write, sigset_t *old_set,
1069 void *puc)
1070{
1071 TranslationBlock *tb;
1072 int ret;
1073
1074 if (cpu_single_env)
1075 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1076#if defined(DEBUG_SIGNAL)
1077 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1078 pc, address, is_write, *(unsigned long *)old_set);
1079#endif
1080 /* XXX: locking issue */
1081 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1082 return 1;
1083 }
1084
1085 /* see if it is an MMU fault */
1086 ret = cpu_x86_handle_mmu_fault(env, address, is_write,
1087 ((env->hflags & HF_CPL_MASK) == 3), 0);
1088 if (ret < 0)
1089 return 0; /* not an MMU fault */
1090 if (ret == 0)
1091 return 1; /* the MMU fault was handled without causing real CPU fault */
1092 /* now we have a real cpu fault */
1093 tb = tb_find_pc(pc);
1094 if (tb) {
1095 /* the PC is inside the translated code. It means that we have
1096 a virtual CPU fault */
1097 cpu_restore_state(tb, env, pc, puc);
1098 }
1099 if (ret == 1) {
1100#if 0
1101 printf("PF exception: EIP=0x%VGv CR2=0x%VGv error=0x%x\n",
1102 env->eip, env->cr[2], env->error_code);
1103#endif
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK, old_set, NULL);
1107 raise_exception_err(env->exception_index, env->error_code);
1108 } else {
1109 /* activate soft MMU for this block */
1110 env->hflags |= HF_SOFTMMU_MASK;
1111 cpu_resume_from_signal(env, puc);
1112 }
1113 /* never comes here */
1114 return 1;
1115}
1116
1117#elif defined(TARGET_ARM)
1118static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1119 int is_write, sigset_t *old_set,
1120 void *puc)
1121{
1122 TranslationBlock *tb;
1123 int ret;
1124
1125 if (cpu_single_env)
1126 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1127#if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc, address, is_write, *(unsigned long *)old_set);
1130#endif
1131 /* XXX: locking issue */
1132 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1133 return 1;
1134 }
1135 /* see if it is an MMU fault */
1136 ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
1137 if (ret < 0)
1138 return 0; /* not an MMU fault */
1139 if (ret == 0)
1140 return 1; /* the MMU fault was handled without causing real CPU fault */
1141 /* now we have a real cpu fault */
1142 tb = tb_find_pc(pc);
1143 if (tb) {
1144 /* the PC is inside the translated code. It means that we have
1145 a virtual CPU fault */
1146 cpu_restore_state(tb, env, pc, puc);
1147 }
1148 /* we restore the process signal mask as the sigreturn should
1149 do it (XXX: use sigsetjmp) */
1150 sigprocmask(SIG_SETMASK, old_set, NULL);
1151 cpu_loop_exit();
1152}
1153#elif defined(TARGET_SPARC)
1154static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1155 int is_write, sigset_t *old_set,
1156 void *puc)
1157{
1158 TranslationBlock *tb;
1159 int ret;
1160
1161 if (cpu_single_env)
1162 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1163#if defined(DEBUG_SIGNAL)
1164 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1165 pc, address, is_write, *(unsigned long *)old_set);
1166#endif
1167 /* XXX: locking issue */
1168 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1169 return 1;
1170 }
1171 /* see if it is an MMU fault */
1172 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
1173 if (ret < 0)
1174 return 0; /* not an MMU fault */
1175 if (ret == 0)
1176 return 1; /* the MMU fault was handled without causing real CPU fault */
1177 /* now we have a real cpu fault */
1178 tb = tb_find_pc(pc);
1179 if (tb) {
1180 /* the PC is inside the translated code. It means that we have
1181 a virtual CPU fault */
1182 cpu_restore_state(tb, env, pc, puc);
1183 }
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1187 cpu_loop_exit();
1188}
1189#elif defined (TARGET_PPC)
1190static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1191 int is_write, sigset_t *old_set,
1192 void *puc)
1193{
1194 TranslationBlock *tb;
1195 int ret;
1196
1197 if (cpu_single_env)
1198 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1199#if defined(DEBUG_SIGNAL)
1200 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1201 pc, address, is_write, *(unsigned long *)old_set);
1202#endif
1203 /* XXX: locking issue */
1204 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1205 return 1;
1206 }
1207
1208 /* see if it is an MMU fault */
1209 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
1210 if (ret < 0)
1211 return 0; /* not an MMU fault */
1212 if (ret == 0)
1213 return 1; /* the MMU fault was handled without causing real CPU fault */
1214
1215 /* now we have a real cpu fault */
1216 tb = tb_find_pc(pc);
1217 if (tb) {
1218 /* the PC is inside the translated code. It means that we have
1219 a virtual CPU fault */
1220 cpu_restore_state(tb, env, pc, puc);
1221 }
1222 if (ret == 1) {
1223#if 0
1224 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1225 env->nip, env->error_code, tb);
1226#endif
1227 /* we restore the process signal mask as the sigreturn should
1228 do it (XXX: use sigsetjmp) */
1229 sigprocmask(SIG_SETMASK, old_set, NULL);
1230 do_raise_exception_err(env->exception_index, env->error_code);
1231 } else {
1232 /* activate soft MMU for this block */
1233 cpu_resume_from_signal(env, puc);
1234 }
1235 /* never comes here */
1236 return 1;
1237}
1238
1239#elif defined(TARGET_M68K)
1240static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1241 int is_write, sigset_t *old_set,
1242 void *puc)
1243{
1244 TranslationBlock *tb;
1245 int ret;
1246
1247 if (cpu_single_env)
1248 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1249#if defined(DEBUG_SIGNAL)
1250 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1251 pc, address, is_write, *(unsigned long *)old_set);
1252#endif
1253 /* XXX: locking issue */
1254 if (is_write && page_unprotect(address, pc, puc)) {
1255 return 1;
1256 }
1257 /* see if it is an MMU fault */
1258 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
1259 if (ret < 0)
1260 return 0; /* not an MMU fault */
1261 if (ret == 0)
1262 return 1; /* the MMU fault was handled without causing real CPU fault */
1263 /* now we have a real cpu fault */
1264 tb = tb_find_pc(pc);
1265 if (tb) {
1266 /* the PC is inside the translated code. It means that we have
1267 a virtual CPU fault */
1268 cpu_restore_state(tb, env, pc, puc);
1269 }
1270 /* we restore the process signal mask as the sigreturn should
1271 do it (XXX: use sigsetjmp) */
1272 sigprocmask(SIG_SETMASK, old_set, NULL);
1273 cpu_loop_exit();
1274 /* never comes here */
1275 return 1;
1276}
1277
1278#elif defined (TARGET_MIPS)
1279static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1280 int is_write, sigset_t *old_set,
1281 void *puc)
1282{
1283 TranslationBlock *tb;
1284 int ret;
1285
1286 if (cpu_single_env)
1287 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1288#if defined(DEBUG_SIGNAL)
1289 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1290 pc, address, is_write, *(unsigned long *)old_set);
1291#endif
1292 /* XXX: locking issue */
1293 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1294 return 1;
1295 }
1296
1297 /* see if it is an MMU fault */
1298 ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
1299 if (ret < 0)
1300 return 0; /* not an MMU fault */
1301 if (ret == 0)
1302 return 1; /* the MMU fault was handled without causing real CPU fault */
1303
1304 /* now we have a real cpu fault */
1305 tb = tb_find_pc(pc);
1306 if (tb) {
1307 /* the PC is inside the translated code. It means that we have
1308 a virtual CPU fault */
1309 cpu_restore_state(tb, env, pc, puc);
1310 }
1311 if (ret == 1) {
1312#if 0
1313 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1314 env->nip, env->error_code, tb);
1315#endif
1316 /* we restore the process signal mask as the sigreturn should
1317 do it (XXX: use sigsetjmp) */
1318 sigprocmask(SIG_SETMASK, old_set, NULL);
1319 do_raise_exception_err(env->exception_index, env->error_code);
1320 } else {
1321 /* activate soft MMU for this block */
1322 cpu_resume_from_signal(env, puc);
1323 }
1324 /* never comes here */
1325 return 1;
1326}
1327
1328#elif defined (TARGET_SH4)
1329static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1330 int is_write, sigset_t *old_set,
1331 void *puc)
1332{
1333 TranslationBlock *tb;
1334 int ret;
1335
1336 if (cpu_single_env)
1337 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1338#if defined(DEBUG_SIGNAL)
1339 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1340 pc, address, is_write, *(unsigned long *)old_set);
1341#endif
1342 /* XXX: locking issue */
1343 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1344 return 1;
1345 }
1346
1347 /* see if it is an MMU fault */
1348 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
1349 if (ret < 0)
1350 return 0; /* not an MMU fault */
1351 if (ret == 0)
1352 return 1; /* the MMU fault was handled without causing real CPU fault */
1353
1354 /* now we have a real cpu fault */
1355 tb = tb_find_pc(pc);
1356 if (tb) {
1357 /* the PC is inside the translated code. It means that we have
1358 a virtual CPU fault */
1359 cpu_restore_state(tb, env, pc, puc);
1360 }
1361#if 0
1362 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1363 env->nip, env->error_code, tb);
1364#endif
1365 /* we restore the process signal mask as the sigreturn should
1366 do it (XXX: use sigsetjmp) */
1367 sigprocmask(SIG_SETMASK, old_set, NULL);
1368 cpu_loop_exit();
1369 /* never comes here */
1370 return 1;
1371}
1372#else
1373#error unsupported target CPU
1374#endif
1375
1376#if defined(__i386__)
1377
1378#if defined(__APPLE__)
1379# include <sys/ucontext.h>
1380
1381# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1382# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1383# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1384#else
1385# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1386# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1387# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1388#endif
1389
1390int cpu_signal_handler(int host_signum, void *pinfo,
1391 void *puc)
1392{
1393 siginfo_t *info = pinfo;
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int trapno;
1397
1398#ifndef REG_EIP
1399/* for glibc 2.1 */
1400#define REG_EIP EIP
1401#define REG_ERR ERR
1402#define REG_TRAPNO TRAPNO
1403#endif
1404 pc = uc->uc_mcontext.gregs[REG_EIP];
1405 trapno = uc->uc_mcontext.gregs[REG_TRAPNO];
1406#if defined(TARGET_I386) && defined(USE_CODE_COPY)
1407 if (trapno == 0x00 || trapno == 0x05) {
1408 /* send division by zero or bound exception */
1409 cpu_send_trap(pc, trapno, uc);
1410 return 1;
1411 } else
1412#endif
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 trapno == 0xe ?
1415 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1416 &uc->uc_sigmask, puc);
1417}
1418
1419#elif defined(__x86_64__)
1420
1421int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1423{
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 unsigned long pc;
1427
1428 pc = uc->uc_mcontext.gregs[REG_RIP];
1429 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1430 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1431 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1432 &uc->uc_sigmask, puc);
1433}
1434
1435#elif defined(__powerpc__)
1436
1437/***********************************************************************
1438 * signal context platform-specific definitions
1439 * From Wine
1440 */
1441#ifdef linux
1442/* All Registers access - only for local access */
1443# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1444/* Gpr Registers access */
1445# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1446# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1447# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1448# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1449# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1450# define LR_sig(context) REG_sig(link, context) /* Link register */
1451# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1452/* Float Registers access */
1453# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1454# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1455/* Exception Registers access */
1456# define DAR_sig(context) REG_sig(dar, context)
1457# define DSISR_sig(context) REG_sig(dsisr, context)
1458# define TRAP_sig(context) REG_sig(trap, context)
1459#endif /* linux */
1460
1461#ifdef __APPLE__
1462# include <sys/ucontext.h>
1463typedef struct ucontext SIGCONTEXT;
1464/* All Registers access - only for local access */
1465# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1466# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1467# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1468# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1469/* Gpr Registers access */
1470# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1471# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1472# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1473# define CTR_sig(context) REG_sig(ctr, context)
1474# define XER_sig(context) REG_sig(xer, context) /* Link register */
1475# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1476# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1477/* Float Registers access */
1478# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1479# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1480/* Exception Registers access */
1481# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1482# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1483# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1484#endif /* __APPLE__ */
1485
1486int cpu_signal_handler(int host_signum, void *pinfo,
1487 void *puc)
1488{
1489 siginfo_t *info = pinfo;
1490 struct ucontext *uc = puc;
1491 unsigned long pc;
1492 int is_write;
1493
1494 pc = IAR_sig(uc);
1495 is_write = 0;
1496#if 0
1497 /* ppc 4xx case */
1498 if (DSISR_sig(uc) & 0x00800000)
1499 is_write = 1;
1500#else
1501 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1502 is_write = 1;
1503#endif
1504 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1505 is_write, &uc->uc_sigmask, puc);
1506}
1507
1508#elif defined(__alpha__)
1509
1510int cpu_signal_handler(int host_signum, void *pinfo,
1511 void *puc)
1512{
1513 siginfo_t *info = pinfo;
1514 struct ucontext *uc = puc;
1515 uint32_t *pc = uc->uc_mcontext.sc_pc;
1516 uint32_t insn = *pc;
1517 int is_write = 0;
1518
1519 /* XXX: need kernel patch to get write flag faster */
1520 switch (insn >> 26) {
1521 case 0x0d: // stw
1522 case 0x0e: // stb
1523 case 0x0f: // stq_u
1524 case 0x24: // stf
1525 case 0x25: // stg
1526 case 0x26: // sts
1527 case 0x27: // stt
1528 case 0x2c: // stl
1529 case 0x2d: // stq
1530 case 0x2e: // stl_c
1531 case 0x2f: // stq_c
1532 is_write = 1;
1533 }
1534
1535 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1536 is_write, &uc->uc_sigmask, puc);
1537}
1538#elif defined(__sparc__)
1539
1540int cpu_signal_handler(int host_signum, void *pinfo,
1541 void *puc)
1542{
1543 siginfo_t *info = pinfo;
1544 uint32_t *regs = (uint32_t *)(info + 1);
1545 void *sigmask = (regs + 20);
1546 unsigned long pc;
1547 int is_write;
1548 uint32_t insn;
1549
1550 /* XXX: is there a standard glibc define ? */
1551 pc = regs[1];
1552 /* XXX: need kernel patch to get write flag faster */
1553 is_write = 0;
1554 insn = *(uint32_t *)pc;
1555 if ((insn >> 30) == 3) {
1556 switch((insn >> 19) & 0x3f) {
1557 case 0x05: // stb
1558 case 0x06: // sth
1559 case 0x04: // st
1560 case 0x07: // std
1561 case 0x24: // stf
1562 case 0x27: // stdf
1563 case 0x25: // stfsr
1564 is_write = 1;
1565 break;
1566 }
1567 }
1568 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1569 is_write, sigmask, NULL);
1570}
1571
1572#elif defined(__arm__)
1573
1574int cpu_signal_handler(int host_signum, void *pinfo,
1575 void *puc)
1576{
1577 siginfo_t *info = pinfo;
1578 struct ucontext *uc = puc;
1579 unsigned long pc;
1580 int is_write;
1581
1582 pc = uc->uc_mcontext.gregs[R15];
1583 /* XXX: compute is_write */
1584 is_write = 0;
1585 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1586 is_write,
1587 &uc->uc_sigmask, puc);
1588}
1589
1590#elif defined(__mc68000)
1591
1592int cpu_signal_handler(int host_signum, void *pinfo,
1593 void *puc)
1594{
1595 siginfo_t *info = pinfo;
1596 struct ucontext *uc = puc;
1597 unsigned long pc;
1598 int is_write;
1599
1600 pc = uc->uc_mcontext.gregs[16];
1601 /* XXX: compute is_write */
1602 is_write = 0;
1603 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1604 is_write,
1605 &uc->uc_sigmask, puc);
1606}
1607
1608#elif defined(__ia64)
1609
1610#ifndef __ISR_VALID
1611 /* This ought to be in <bits/siginfo.h>... */
1612# define __ISR_VALID 1
1613#endif
1614
1615int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1616{
1617 siginfo_t *info = pinfo;
1618 struct ucontext *uc = puc;
1619 unsigned long ip;
1620 int is_write = 0;
1621
1622 ip = uc->uc_mcontext.sc_ip;
1623 switch (host_signum) {
1624 case SIGILL:
1625 case SIGFPE:
1626 case SIGSEGV:
1627 case SIGBUS:
1628 case SIGTRAP:
1629 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1630 /* ISR.W (write-access) is bit 33: */
1631 is_write = (info->si_isr >> 33) & 1;
1632 break;
1633
1634 default:
1635 break;
1636 }
1637 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1638 is_write,
1639 &uc->uc_sigmask, puc);
1640}
1641
1642#elif defined(__s390__)
1643
1644int cpu_signal_handler(int host_signum, void *pinfo,
1645 void *puc)
1646{
1647 siginfo_t *info = pinfo;
1648 struct ucontext *uc = puc;
1649 unsigned long pc;
1650 int is_write;
1651
1652 pc = uc->uc_mcontext.psw.addr;
1653 /* XXX: compute is_write */
1654 is_write = 0;
1655 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1656 is_write,
1657 &uc->uc_sigmask, puc);
1658}
1659
1660#else
1661
1662#error host CPU specific signal handler needed
1663
1664#endif
1665
1666#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette