VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 69301

最後變更 在這個檔案從69301是 62286,由 vboxsync 提交於 8 年 前

REM: Fixed TRPM -> recompiler IRQ translation problem, well hacked it.

  • 屬性 svn:eol-style 設為 native
檔案大小: 52.9 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 uintptr_t next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239 uintptr_t next_tb;
240
241# ifndef VBOX
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
244# endif /* !VBOX */
245
246 cpu_single_env = env1;
247
248 /* the access to env below is actually saving the global register's
249 value, so that files not including target-xyz/exec.h are free to
250 use it. */
251 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
252 saved_env_reg = (host_reg_t) env;
253 barrier();
254 env = env1;
255
256 if (unlikely(exit_request)) {
257 env->exit_request = 1;
258 }
259
260#if defined(TARGET_I386)
261 if (!kvm_enabled()) {
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 }
268#elif defined(TARGET_SPARC)
269#elif defined(TARGET_M68K)
270 env->cc_op = CC_OP_FLAGS;
271 env->cc_dest = env->sr & 0xf;
272 env->cc_x = (env->sr >> 4) & 1;
273#elif defined(TARGET_ALPHA)
274#elif defined(TARGET_ARM)
275#elif defined(TARGET_PPC)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281 /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
286 env->exception_index = -1;
287#endif /* !VBOX */
288
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293#undef env
294 env = cpu_single_env;
295#define env cpu_single_env
296#endif
297#ifdef VBOX
298 env->current_tb = NULL; /* probably not needed, but whatever... */
299
300 /*
301 * Check for fatal errors first
302 */
303 if (env->interrupt_request & CPU_INTERRUPT_RC) {
304 env->exception_index = EXCP_RC;
305 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
306 ret = env->exception_index;
307 cpu_loop_exit();
308 }
309#endif
310
311 /* if an exception is pending, we execute it here */
312 if (env->exception_index >= 0) {
313 if (env->exception_index >= EXCP_INTERRUPT) {
314 /* exit request from the cpu execution loop */
315 ret = env->exception_index;
316#ifdef VBOX /* because of the above stuff */
317 env->exception_index = -1;
318#endif
319 if (ret == EXCP_DEBUG)
320 cpu_handle_debug_exception(env);
321 break;
322 } else {
323#if defined(CONFIG_USER_ONLY)
324 /* if user mode only, we simulate a fake exception
325 which will be handled outside the cpu execution
326 loop */
327#if defined(TARGET_I386)
328 do_interrupt_user(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip);
332 /* successfully delivered */
333 env->old_exception = -1;
334#endif
335 ret = env->exception_index;
336 break;
337#else
338#if defined(TARGET_I386)
339 /* simulate a real cpu exception. On i386, it can
340 trigger new exceptions, but we do not handle
341 double or triple faults yet. */
342# ifdef VBOX
343 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
344 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
345 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
346# endif /* VBOX */
347 do_interrupt(env->exception_index,
348 env->exception_is_int && env->exception_is_int != EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ,
349 env->error_code,
350 env->exception_next_eip,
351 env->exception_is_int == EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ);
352# ifdef IEM_VERIFICATION_MODE /* Ugly hacks */
353 cpu_loop_exit();
354# endif
355 /* successfully delivered */
356 env->old_exception = -1;
357# ifdef VBOX
358 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
359# endif /* VBOX */
360#elif defined(TARGET_PPC)
361 do_interrupt(env);
362#elif defined(TARGET_MICROBLAZE)
363 do_interrupt(env);
364#elif defined(TARGET_MIPS)
365 do_interrupt(env);
366#elif defined(TARGET_SPARC)
367 do_interrupt(env);
368#elif defined(TARGET_ARM)
369 do_interrupt(env);
370#elif defined(TARGET_SH4)
371 do_interrupt(env);
372#elif defined(TARGET_ALPHA)
373 do_interrupt(env);
374#elif defined(TARGET_CRIS)
375 do_interrupt(env);
376#elif defined(TARGET_M68K)
377 do_interrupt(0);
378#endif
379 env->exception_index = -1;
380#endif
381 }
382 }
383
384# ifndef VBOX
385 if (kvm_enabled()) {
386 kvm_cpu_exec(env);
387 longjmp(env->jmp_env, 1);
388 }
389# endif /* !VBOX */
390
391 next_tb = 0; /* force lookup of first TB */
392 for(;;) {
393 interrupt_request = env->interrupt_request;
394 if (unlikely(interrupt_request)) {
395 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
396 /* Mask out external interrupts for this step. */
397 interrupt_request &= ~(CPU_INTERRUPT_HARD |
398 CPU_INTERRUPT_FIQ |
399 CPU_INTERRUPT_SMI |
400 CPU_INTERRUPT_NMI);
401 }
402 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
403 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
404 env->exception_index = EXCP_DEBUG;
405 cpu_loop_exit();
406 }
407#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
408 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
409 defined(TARGET_MICROBLAZE)
410 if (interrupt_request & CPU_INTERRUPT_HALT) {
411 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
412 env->halted = 1;
413 env->exception_index = EXCP_HLT;
414 cpu_loop_exit();
415 }
416#endif
417#if defined(TARGET_I386)
418# ifdef VBOX
419 /* Memory registration may post a tlb flush request, process it ASAP. */
420 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)) {
421 tlb_flush(env, true); /* (clears the flush flag) */
422 }
423
424 /* Single instruction exec request, we execute it and return (one way or the other).
425 The caller will always reschedule after doing this operation! */
426 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
427 {
428 /* not in flight are we? (if we are, we trapped) */
429 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
430 {
431 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
432 env->exception_index = EXCP_SINGLE_INSTR;
433 if (emulate_single_instr(env) == -1)
434 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
435
436 /* When we receive an external interrupt during execution of this single
437 instruction, then we should stay here. We will leave when we're ready
438 for raw-mode or when interrupted by pending EMT requests. */
439 interrupt_request = env->interrupt_request; /* reload this! */
440 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
441 || !(env->eflags & IF_MASK)
442 || (env->hflags & HF_INHIBIT_IRQ_MASK)
443 || (env->state & CPU_RAW_HM)
444 )
445 {
446 env->exception_index = ret = EXCP_SINGLE_INSTR;
447 cpu_loop_exit();
448 }
449 }
450 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
451 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
452# ifdef IEM_VERIFICATION_MODE
453 env->exception_index = ret = EXCP_SINGLE_INSTR;
454 cpu_loop_exit();
455# endif
456 }
457# endif /* VBOX */
458
459# ifndef VBOX /** @todo reconcile our code with the following... */
460 if (interrupt_request & CPU_INTERRUPT_INIT) {
461 svm_check_intercept(SVM_EXIT_INIT);
462 do_cpu_init(env);
463 env->exception_index = EXCP_HALTED;
464 cpu_loop_exit();
465 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
466 do_cpu_sipi(env);
467 } else if (env->hflags2 & HF2_GIF_MASK) {
468 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
469 !(env->hflags & HF_SMM_MASK)) {
470 svm_check_intercept(SVM_EXIT_SMI);
471 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
472 do_smm_enter();
473 next_tb = 0;
474 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
475 !(env->hflags2 & HF2_NMI_MASK)) {
476 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
477 env->hflags2 |= HF2_NMI_MASK;
478 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
479 next_tb = 0;
480 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
481 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
482 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
483 next_tb = 0;
484 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
485 (((env->hflags2 & HF2_VINTR_MASK) &&
486 (env->hflags2 & HF2_HIF_MASK)) ||
487 (!(env->hflags2 & HF2_VINTR_MASK) &&
488 (env->eflags & IF_MASK &&
489 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
490 int intno;
491 svm_check_intercept(SVM_EXIT_INTR);
492 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
493 intno = cpu_get_pic_interrupt(env);
494 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
495#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
496#undef env
497 env = cpu_single_env;
498#define env cpu_single_env
499#endif
500 do_interrupt(intno, 0, 0, 0, 1);
501 /* ensure that no TB jump will be modified as
502 the program flow was changed */
503 next_tb = 0;
504#if !defined(CONFIG_USER_ONLY)
505 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
506 (env->eflags & IF_MASK) &&
507 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
508 int intno;
509 /* FIXME: this should respect TPR */
510 svm_check_intercept(SVM_EXIT_VINTR);
511 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
512 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
513 do_interrupt(intno, 0, 0, 0, 1);
514 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
515 next_tb = 0;
516#endif
517 }
518 }
519# else /* VBOX */
520 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
521 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
522 !(env->hflags & HF_SMM_MASK)) {
523 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
524 do_smm_enter();
525 next_tb = 0;
526 }
527 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
528 (env->eflags & IF_MASK) &&
529 !(env->hflags & HF_INHIBIT_IRQ_MASK))
530 {
531 /* if hardware interrupt pending, we execute it */
532 int intno;
533 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
534 intno = cpu_get_pic_interrupt(env);
535 if (intno >= 0)
536 {
537 Log(("do_interrupt %d\n", intno));
538 do_interrupt(intno, 0, 0, 0, 1);
539 }
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
542 next_tb = 0;
543 }
544# endif /* VBOX */
545#elif defined(TARGET_PPC)
546#if 0
547 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
548 cpu_reset(env);
549 }
550#endif
551 if (interrupt_request & CPU_INTERRUPT_HARD) {
552 ppc_hw_interrupt(env);
553 if (env->pending_interrupts == 0)
554 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
555 next_tb = 0;
556 }
557#elif defined(TARGET_MICROBLAZE)
558 if ((interrupt_request & CPU_INTERRUPT_HARD)
559 && (env->sregs[SR_MSR] & MSR_IE)
560 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
561 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
562 env->exception_index = EXCP_IRQ;
563 do_interrupt(env);
564 next_tb = 0;
565 }
566#elif defined(TARGET_MIPS)
567 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
568 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
569 (env->CP0_Status & (1 << CP0St_IE)) &&
570 !(env->CP0_Status & (1 << CP0St_EXL)) &&
571 !(env->CP0_Status & (1 << CP0St_ERL)) &&
572 !(env->hflags & MIPS_HFLAG_DM)) {
573 /* Raise it */
574 env->exception_index = EXCP_EXT_INTERRUPT;
575 env->error_code = 0;
576 do_interrupt(env);
577 next_tb = 0;
578 }
579#elif defined(TARGET_SPARC)
580 if (interrupt_request & CPU_INTERRUPT_HARD) {
581 if (cpu_interrupts_enabled(env) &&
582 env->interrupt_index > 0) {
583 int pil = env->interrupt_index & 0xf;
584 int type = env->interrupt_index & 0xf0;
585
586 if (((type == TT_EXTINT) &&
587 cpu_pil_allowed(env, pil)) ||
588 type != TT_EXTINT) {
589 env->exception_index = env->interrupt_index;
590 do_interrupt(env);
591 next_tb = 0;
592 }
593 }
594 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
595 //do_interrupt(0, 0, 0, 0, 0);
596 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
597 }
598#elif defined(TARGET_ARM)
599 if (interrupt_request & CPU_INTERRUPT_FIQ
600 && !(env->uncached_cpsr & CPSR_F)) {
601 env->exception_index = EXCP_FIQ;
602 do_interrupt(env);
603 next_tb = 0;
604 }
605 /* ARMv7-M interrupt return works by loading a magic value
606 into the PC. On real hardware the load causes the
607 return to occur. The qemu implementation performs the
608 jump normally, then does the exception return when the
609 CPU tries to execute code at the magic address.
610 This will cause the magic PC value to be pushed to
611 the stack if an interrupt occured at the wrong time.
612 We avoid this by disabling interrupts when
613 pc contains a magic address. */
614 if (interrupt_request & CPU_INTERRUPT_HARD
615 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
616 || !(env->uncached_cpsr & CPSR_I))) {
617 env->exception_index = EXCP_IRQ;
618 do_interrupt(env);
619 next_tb = 0;
620 }
621#elif defined(TARGET_SH4)
622 if (interrupt_request & CPU_INTERRUPT_HARD) {
623 do_interrupt(env);
624 next_tb = 0;
625 }
626#elif defined(TARGET_ALPHA)
627 if (interrupt_request & CPU_INTERRUPT_HARD) {
628 do_interrupt(env);
629 next_tb = 0;
630 }
631#elif defined(TARGET_CRIS)
632 if (interrupt_request & CPU_INTERRUPT_HARD
633 && (env->pregs[PR_CCS] & I_FLAG)
634 && !env->locked_irq) {
635 env->exception_index = EXCP_IRQ;
636 do_interrupt(env);
637 next_tb = 0;
638 }
639 if (interrupt_request & CPU_INTERRUPT_NMI
640 && (env->pregs[PR_CCS] & M_FLAG)) {
641 env->exception_index = EXCP_NMI;
642 do_interrupt(env);
643 next_tb = 0;
644 }
645#elif defined(TARGET_M68K)
646 if (interrupt_request & CPU_INTERRUPT_HARD
647 && ((env->sr & SR_I) >> SR_I_SHIFT)
648 < env->pending_level) {
649 /* Real hardware gets the interrupt vector via an
650 IACK cycle at this point. Current emulated
651 hardware doesn't rely on this, so we
652 provide/save the vector when the interrupt is
653 first signalled. */
654 env->exception_index = env->pending_vector;
655 do_interrupt(1);
656 next_tb = 0;
657 }
658#endif
659 /* Don't use the cached interupt_request value,
660 do_interrupt may have updated the EXITTB flag. */
661 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
662#ifndef VBOX
663 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
664#else /* VBOX */
665 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
666#endif /* VBOX */
667 /* ensure that no TB jump will be modified as
668 the program flow was changed */
669 next_tb = 0;
670 }
671#ifdef VBOX
672 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
673 if (interrupt_request & CPU_INTERRUPT_RC) {
674 env->exception_index = EXCP_RC;
675 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
676 ret = env->exception_index;
677 cpu_loop_exit();
678 }
679 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
680 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
681 env->exit_request = 1;
682 }
683#endif
684 }
685 if (unlikely(env->exit_request)) {
686 env->exit_request = 0;
687 env->exception_index = EXCP_INTERRUPT;
688 cpu_loop_exit();
689 }
690
691#ifdef VBOX
692 /*
693 * Check if we the CPU state allows us to execute the code in raw-mode.
694 */
695 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
696 if (remR3CanExecuteRaw(env,
697 env->eip + env->segs[R_CS].base,
698 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
699 &env->exception_index))
700 {
701 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
702 ret = env->exception_index;
703 cpu_loop_exit();
704 }
705 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
706#endif /* VBOX */
707
708#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
709 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
710 /* restore flags in standard format */
711#if defined(TARGET_I386)
712 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
713 log_cpu_state(env, X86_DUMP_CCOP);
714 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
715#elif defined(TARGET_M68K)
716 cpu_m68k_flush_flags(env, env->cc_op);
717 env->cc_op = CC_OP_FLAGS;
718 env->sr = (env->sr & 0xffe0)
719 | env->cc_dest | (env->cc_x << 4);
720 log_cpu_state(env, 0);
721#else
722 log_cpu_state(env, 0);
723#endif
724 }
725#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
726#ifdef VBOX
727 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
728#endif /*VBOX*/
729 spin_lock(&tb_lock);
730 tb = tb_find_fast();
731 /* Note: we do it here to avoid a gcc bug on Mac OS X when
732 doing it in tb_find_slow */
733 if (tb_invalidated_flag) {
734 /* as some TB could have been invalidated because
735 of memory exceptions while generating the code, we
736 must recompute the hash index here */
737 next_tb = 0;
738 tb_invalidated_flag = 0;
739 }
740#ifdef CONFIG_DEBUG_EXEC
741 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
742 (void *)tb->tc_ptr, tb->pc,
743 lookup_symbol(tb->pc));
744#endif
745 /* see if we can patch the calling TB. When the TB
746 spans two pages, we cannot safely do a direct
747 jump. */
748#ifndef VBOX
749 if (next_tb != 0 && tb->page_addr[1] == -1) {
750#else /* VBOX */
751 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
752#endif /* VBOX */
753 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
754 }
755 spin_unlock(&tb_lock);
756#ifdef VBOX
757 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
758#endif
759
760 /* cpu_interrupt might be called while translating the
761 TB, but before it is linked into a potentially
762 infinite loop and becomes env->current_tb. Avoid
763 starting execution if there is a pending interrupt. */
764 env->current_tb = tb;
765 barrier();
766 if (likely(!env->exit_request)) {
767 tc_ptr = tb->tc_ptr;
768 /* execute the generated code */
769#ifdef VBOX
770 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
771#endif
772#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
773#undef env
774 env = cpu_single_env;
775#define env cpu_single_env
776#endif
777 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip));
778#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
779 tcg_qemu_tb_exec(tc_ptr, next_tb);
780#else
781 next_tb = tcg_qemu_tb_exec(tc_ptr);
782#endif
783 if (next_tb)
784 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip));
785#ifdef VBOX
786 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
787#endif
788 if ((next_tb & 3) == 2) {
789 /* Instruction counter expired. */
790 int insns_left;
791 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3);
792 /* Restore PC. */
793 cpu_pc_from_tb(env, tb);
794 insns_left = env->icount_decr.u32;
795 if (env->icount_extra && insns_left >= 0) {
796 /* Refill decrementer and continue execution. */
797 env->icount_extra += insns_left;
798 if (env->icount_extra > 0xffff) {
799 insns_left = 0xffff;
800 } else {
801 insns_left = env->icount_extra;
802 }
803 env->icount_extra -= insns_left;
804 env->icount_decr.u16.low = insns_left;
805 } else {
806 if (insns_left > 0) {
807 /* Execute remaining instructions. */
808 cpu_exec_nocache(insns_left, tb);
809 }
810 env->exception_index = EXCP_INTERRUPT;
811 next_tb = 0;
812 cpu_loop_exit();
813 }
814 }
815 }
816 env->current_tb = NULL;
817 /* reset soft MMU for next block (it can currently
818 only be set by a memory fault) */
819 } /* for(;;) */
820 }
821#ifdef VBOX_HIGH_RES_TIMERS_HACK
822 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
823 unnecessary (like crashing during emulate single instruction).
824 Note! Don't use env1->pVM here, the code wouldn't run with
825 gcc-4.4/amd64 anymore, see #3883. */
826 env->current_tb = NULL;
827 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
828 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
829 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
830 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
831 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
832 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
833 TMR3TimerQueuesDo(env->pVM);
834 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
835 }
836#endif
837 } /* for(;;) */
838
839
840#if defined(TARGET_I386)
841 /* restore flags in standard format */
842 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
843#elif defined(TARGET_ARM)
844 /* XXX: Save/restore host fpu exception state?. */
845#elif defined(TARGET_SPARC)
846#elif defined(TARGET_PPC)
847#elif defined(TARGET_M68K)
848 cpu_m68k_flush_flags(env, env->cc_op);
849 env->cc_op = CC_OP_FLAGS;
850 env->sr = (env->sr & 0xffe0)
851 | env->cc_dest | (env->cc_x << 4);
852#elif defined(TARGET_MICROBLAZE)
853#elif defined(TARGET_MIPS)
854#elif defined(TARGET_SH4)
855#elif defined(TARGET_ALPHA)
856#elif defined(TARGET_CRIS)
857#elif defined(TARGET_S390X)
858 /* XXXXX */
859#else
860#error unsupported target CPU
861#endif
862
863 /* restore global registers */
864 barrier();
865 env = (void *) saved_env_reg;
866
867# ifndef VBOX /* we might be using elsewhere, we only have one. */
868 /* fail safe : never use cpu_single_env outside cpu_exec() */
869 cpu_single_env = NULL;
870# endif
871 return ret;
872}
873
874/* must only be called from the generated code as an exception can be
875 generated */
876void tb_invalidate_page_range(target_ulong start, target_ulong end)
877{
878 /* XXX: cannot enable it yet because it yields to MMU exception
879 where NIP != read address on PowerPC */
880#if 0
881 target_ulong phys_addr;
882 phys_addr = get_phys_addr_code(env, start);
883 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
884#endif
885}
886
887#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
888
889void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
890{
891 CPUX86State *saved_env;
892
893 saved_env = env;
894 env = s;
895 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
896 selector &= 0xffff;
897 cpu_x86_load_seg_cache(env, seg_reg, selector,
898 (selector << 4), 0xffff, 0);
899 } else {
900 helper_load_seg(seg_reg, selector);
901 }
902 env = saved_env;
903}
904
905void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
906{
907 CPUX86State *saved_env;
908
909 saved_env = env;
910 env = s;
911
912 helper_fsave(ptr, data32);
913
914 env = saved_env;
915}
916
917void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
918{
919 CPUX86State *saved_env;
920
921 saved_env = env;
922 env = s;
923
924 helper_frstor(ptr, data32);
925
926 env = saved_env;
927}
928
929#endif /* TARGET_I386 */
930
931#if !defined(CONFIG_SOFTMMU)
932
933#if defined(TARGET_I386)
934#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
935#else
936#define EXCEPTION_ACTION cpu_loop_exit()
937#endif
938
939/* 'pc' is the host PC at which the exception was raised. 'address' is
940 the effective address of the memory exception. 'is_write' is 1 if a
941 write caused the exception and otherwise 0'. 'old_set' is the
942 signal set which should be restored */
943static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
944 int is_write, sigset_t *old_set,
945 void *puc)
946{
947 TranslationBlock *tb;
948 int ret;
949
950 if (cpu_single_env)
951 env = cpu_single_env; /* XXX: find a correct solution for multithread */
952#if defined(DEBUG_SIGNAL)
953 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954 pc, address, is_write, *(unsigned long *)old_set);
955#endif
956 /* XXX: locking issue */
957 if (is_write && page_unprotect(h2g(address), pc, puc)) {
958 return 1;
959 }
960
961 /* see if it is an MMU fault */
962 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
963 if (ret < 0)
964 return 0; /* not an MMU fault */
965 if (ret == 0)
966 return 1; /* the MMU fault was handled without causing real CPU fault */
967 /* now we have a real cpu fault */
968 tb = tb_find_pc(pc);
969 if (tb) {
970 /* the PC is inside the translated code. It means that we have
971 a virtual CPU fault */
972 cpu_restore_state(tb, env, pc, puc);
973 }
974
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
978 EXCEPTION_ACTION;
979
980 /* never comes here */
981 return 1;
982}
983
984#if defined(__i386__)
985
986#if defined(__APPLE__)
987# include <sys/ucontext.h>
988
989# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
990# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
991# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
992# define MASK_sig(context) ((context)->uc_sigmask)
993#elif defined (__NetBSD__)
994# include <ucontext.h>
995
996# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
997# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
998# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
999# define MASK_sig(context) ((context)->uc_sigmask)
1000#elif defined (__FreeBSD__) || defined(__DragonFly__)
1001# include <ucontext.h>
1002
1003# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
1004# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1005# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1006# define MASK_sig(context) ((context)->uc_sigmask)
1007#elif defined(__OpenBSD__)
1008# define EIP_sig(context) ((context)->sc_eip)
1009# define TRAP_sig(context) ((context)->sc_trapno)
1010# define ERROR_sig(context) ((context)->sc_err)
1011# define MASK_sig(context) ((context)->sc_mask)
1012#else
1013# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1014# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1015# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1016# define MASK_sig(context) ((context)->uc_sigmask)
1017#endif
1018
1019int cpu_signal_handler(int host_signum, void *pinfo,
1020 void *puc)
1021{
1022 siginfo_t *info = pinfo;
1023#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1024 ucontext_t *uc = puc;
1025#elif defined(__OpenBSD__)
1026 struct sigcontext *uc = puc;
1027#else
1028 struct ucontext *uc = puc;
1029#endif
1030 uintptr_t pc;
1031 int trapno;
1032
1033#ifndef REG_EIP
1034/* for glibc 2.1 */
1035#define REG_EIP EIP
1036#define REG_ERR ERR
1037#define REG_TRAPNO TRAPNO
1038#endif
1039 pc = EIP_sig(uc);
1040 trapno = TRAP_sig(uc);
1041 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1042 trapno == 0xe ?
1043 (ERROR_sig(uc) >> 1) & 1 : 0,
1044 &MASK_sig(uc), puc);
1045}
1046
1047#elif defined(__x86_64__)
1048
1049#ifdef __NetBSD__
1050#define PC_sig(context) _UC_MACHINE_PC(context)
1051#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1052#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1053#define MASK_sig(context) ((context)->uc_sigmask)
1054#elif defined(__OpenBSD__)
1055#define PC_sig(context) ((context)->sc_rip)
1056#define TRAP_sig(context) ((context)->sc_trapno)
1057#define ERROR_sig(context) ((context)->sc_err)
1058#define MASK_sig(context) ((context)->sc_mask)
1059#elif defined (__FreeBSD__) || defined(__DragonFly__)
1060#include <ucontext.h>
1061
1062#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1063#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1064#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1065#define MASK_sig(context) ((context)->uc_sigmask)
1066#else
1067#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1068#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1069#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1070#define MASK_sig(context) ((context)->uc_sigmask)
1071#endif
1072
1073int cpu_signal_handler(int host_signum, void *pinfo,
1074 void *puc)
1075{
1076 siginfo_t *info = pinfo;
1077 uintptr_t pc;
1078#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1079 ucontext_t *uc = puc;
1080#elif defined(__OpenBSD__)
1081 struct sigcontext *uc = puc;
1082#else
1083 struct ucontext *uc = puc;
1084#endif
1085
1086 pc = PC_sig(uc);
1087 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1088 TRAP_sig(uc) == 0xe ?
1089 (ERROR_sig(uc) >> 1) & 1 : 0,
1090 &MASK_sig(uc), puc);
1091}
1092
1093#elif defined(_ARCH_PPC)
1094
1095/***********************************************************************
1096 * signal context platform-specific definitions
1097 * From Wine
1098 */
1099#ifdef linux
1100/* All Registers access - only for local access */
1101# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1102/* Gpr Registers access */
1103# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1104# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1105# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1106# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1107# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1108# define LR_sig(context) REG_sig(link, context) /* Link register */
1109# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1110/* Float Registers access */
1111# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1112# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1113/* Exception Registers access */
1114# define DAR_sig(context) REG_sig(dar, context)
1115# define DSISR_sig(context) REG_sig(dsisr, context)
1116# define TRAP_sig(context) REG_sig(trap, context)
1117#endif /* linux */
1118
1119#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1120#include <ucontext.h>
1121# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1122# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1123# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1124# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1125# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1126# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1127/* Exception Registers access */
1128# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1129# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1130# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1131#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1132
1133#ifdef __APPLE__
1134# include <sys/ucontext.h>
1135typedef struct ucontext SIGCONTEXT;
1136/* All Registers access - only for local access */
1137# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1138# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1139# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1140# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1141/* Gpr Registers access */
1142# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1143# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1144# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1145# define CTR_sig(context) REG_sig(ctr, context)
1146# define XER_sig(context) REG_sig(xer, context) /* Link register */
1147# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1148# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1149/* Float Registers access */
1150# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1151# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1152/* Exception Registers access */
1153# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1154# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1155# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1156#endif /* __APPLE__ */
1157
1158int cpu_signal_handler(int host_signum, void *pinfo,
1159 void *puc)
1160{
1161 siginfo_t *info = pinfo;
1162#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1163 ucontext_t *uc = puc;
1164#else
1165 struct ucontext *uc = puc;
1166#endif
1167 uintptr_t pc;
1168 int is_write;
1169
1170 pc = IAR_sig(uc);
1171 is_write = 0;
1172#if 0
1173 /* ppc 4xx case */
1174 if (DSISR_sig(uc) & 0x00800000)
1175 is_write = 1;
1176#else
1177 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1178 is_write = 1;
1179#endif
1180 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1181 is_write, &uc->uc_sigmask, puc);
1182}
1183
1184#elif defined(__alpha__)
1185
1186int cpu_signal_handler(int host_signum, void *pinfo,
1187 void *puc)
1188{
1189 siginfo_t *info = pinfo;
1190 struct ucontext *uc = puc;
1191 uint32_t *pc = uc->uc_mcontext.sc_pc;
1192 uint32_t insn = *pc;
1193 int is_write = 0;
1194
1195 /* XXX: need kernel patch to get write flag faster */
1196 switch (insn >> 26) {
1197 case 0x0d: // stw
1198 case 0x0e: // stb
1199 case 0x0f: // stq_u
1200 case 0x24: // stf
1201 case 0x25: // stg
1202 case 0x26: // sts
1203 case 0x27: // stt
1204 case 0x2c: // stl
1205 case 0x2d: // stq
1206 case 0x2e: // stl_c
1207 case 0x2f: // stq_c
1208 is_write = 1;
1209 }
1210
1211 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1212 is_write, &uc->uc_sigmask, puc);
1213}
1214#elif defined(__sparc__)
1215
1216int cpu_signal_handler(int host_signum, void *pinfo,
1217 void *puc)
1218{
1219 siginfo_t *info = pinfo;
1220 int is_write;
1221 uint32_t insn;
1222#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1223 uint32_t *regs = (uint32_t *)(info + 1);
1224 void *sigmask = (regs + 20);
1225 /* XXX: is there a standard glibc define ? */
1226 uintptr_t pc = regs[1];
1227#else
1228#ifdef __linux__
1229 struct sigcontext *sc = puc;
1230 uintptr_t pc = sc->sigc_regs.tpc;
1231 void *sigmask = (void *)sc->sigc_mask;
1232#elif defined(__OpenBSD__)
1233 struct sigcontext *uc = puc;
1234 uintptr_t pc = uc->sc_pc;
1235 void *sigmask = (void *)(uintptr_t)uc->sc_mask;
1236#endif
1237#endif
1238
1239 /* XXX: need kernel patch to get write flag faster */
1240 is_write = 0;
1241 insn = *(uint32_t *)pc;
1242 if ((insn >> 30) == 3) {
1243 switch((insn >> 19) & 0x3f) {
1244 case 0x05: // stb
1245 case 0x15: // stba
1246 case 0x06: // sth
1247 case 0x16: // stha
1248 case 0x04: // st
1249 case 0x14: // sta
1250 case 0x07: // std
1251 case 0x17: // stda
1252 case 0x0e: // stx
1253 case 0x1e: // stxa
1254 case 0x24: // stf
1255 case 0x34: // stfa
1256 case 0x27: // stdf
1257 case 0x37: // stdfa
1258 case 0x26: // stqf
1259 case 0x36: // stqfa
1260 case 0x25: // stfsr
1261 case 0x3c: // casa
1262 case 0x3e: // casxa
1263 is_write = 1;
1264 break;
1265 }
1266 }
1267 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1268 is_write, sigmask, NULL);
1269}
1270
1271#elif defined(__arm__)
1272
1273int cpu_signal_handler(int host_signum, void *pinfo,
1274 void *puc)
1275{
1276 siginfo_t *info = pinfo;
1277 struct ucontext *uc = puc;
1278 uintptr_t pc;
1279 int is_write;
1280
1281#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1282 pc = uc->uc_mcontext.gregs[R15];
1283#else
1284 pc = uc->uc_mcontext.arm_pc;
1285#endif
1286 /* XXX: compute is_write */
1287 is_write = 0;
1288 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1289 is_write,
1290 &uc->uc_sigmask, puc);
1291}
1292
1293#elif defined(__mc68000)
1294
1295int cpu_signal_handler(int host_signum, void *pinfo,
1296 void *puc)
1297{
1298 siginfo_t *info = pinfo;
1299 struct ucontext *uc = puc;
1300 uintptr_t pc;
1301 int is_write;
1302
1303 pc = uc->uc_mcontext.gregs[16];
1304 /* XXX: compute is_write */
1305 is_write = 0;
1306 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1307 is_write,
1308 &uc->uc_sigmask, puc);
1309}
1310
1311#elif defined(__ia64)
1312
1313#ifndef __ISR_VALID
1314 /* This ought to be in <bits/siginfo.h>... */
1315# define __ISR_VALID 1
1316#endif
1317
1318int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1319{
1320 siginfo_t *info = pinfo;
1321 struct ucontext *uc = puc;
1322 uintptr_t ip;
1323 int is_write = 0;
1324
1325 ip = uc->uc_mcontext.sc_ip;
1326 switch (host_signum) {
1327 case SIGILL:
1328 case SIGFPE:
1329 case SIGSEGV:
1330 case SIGBUS:
1331 case SIGTRAP:
1332 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1333 /* ISR.W (write-access) is bit 33: */
1334 is_write = (info->si_isr >> 33) & 1;
1335 break;
1336
1337 default:
1338 break;
1339 }
1340 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1341 is_write,
1342 (sigset_t *)&uc->uc_sigmask, puc);
1343}
1344
1345#elif defined(__s390__)
1346
1347int cpu_signal_handler(int host_signum, void *pinfo,
1348 void *puc)
1349{
1350 siginfo_t *info = pinfo;
1351 struct ucontext *uc = puc;
1352 uintptr_t pc;
1353 uint16_t *pinsn;
1354 int is_write = 0;
1355
1356 pc = uc->uc_mcontext.psw.addr;
1357
1358 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1359 of the normal 2 arguments. The 3rd argument contains the "int_code"
1360 from the hardware which does in fact contain the is_write value.
1361 The rt signal handler, as far as I can tell, does not give this value
1362 at all. Not that we could get to it from here even if it were. */
1363 /* ??? This is not even close to complete, since it ignores all
1364 of the read-modify-write instructions. */
1365 pinsn = (uint16_t *)pc;
1366 switch (pinsn[0] >> 8) {
1367 case 0x50: /* ST */
1368 case 0x42: /* STC */
1369 case 0x40: /* STH */
1370 is_write = 1;
1371 break;
1372 case 0xc4: /* RIL format insns */
1373 switch (pinsn[0] & 0xf) {
1374 case 0xf: /* STRL */
1375 case 0xb: /* STGRL */
1376 case 0x7: /* STHRL */
1377 is_write = 1;
1378 }
1379 break;
1380 case 0xe3: /* RXY format insns */
1381 switch (pinsn[2] & 0xff) {
1382 case 0x50: /* STY */
1383 case 0x24: /* STG */
1384 case 0x72: /* STCY */
1385 case 0x70: /* STHY */
1386 case 0x8e: /* STPQ */
1387 case 0x3f: /* STRVH */
1388 case 0x3e: /* STRV */
1389 case 0x2f: /* STRVG */
1390 is_write = 1;
1391 }
1392 break;
1393 }
1394 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1395 is_write, &uc->uc_sigmask, puc);
1396}
1397
1398#elif defined(__mips__)
1399
1400int cpu_signal_handler(int host_signum, void *pinfo,
1401 void *puc)
1402{
1403 siginfo_t *info = pinfo;
1404 struct ucontext *uc = puc;
1405 greg_t pc = uc->uc_mcontext.pc;
1406 int is_write;
1407
1408 /* XXX: compute is_write */
1409 is_write = 0;
1410 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1411 is_write, &uc->uc_sigmask, puc);
1412}
1413
1414#elif defined(__hppa__)
1415
1416int cpu_signal_handler(int host_signum, void *pinfo,
1417 void *puc)
1418{
1419 struct siginfo *info = pinfo;
1420 struct ucontext *uc = puc;
1421 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0];
1422 uint32_t insn = *(uint32_t *)pc;
1423 int is_write = 0;
1424
1425 /* XXX: need kernel patch to get write flag faster. */
1426 switch (insn >> 26) {
1427 case 0x1a: /* STW */
1428 case 0x19: /* STH */
1429 case 0x18: /* STB */
1430 case 0x1b: /* STWM */
1431 is_write = 1;
1432 break;
1433
1434 case 0x09: /* CSTWX, FSTWX, FSTWS */
1435 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1436 /* Distinguish from coprocessor load ... */
1437 is_write = (insn >> 9) & 1;
1438 break;
1439
1440 case 0x03:
1441 switch ((insn >> 6) & 15) {
1442 case 0xa: /* STWS */
1443 case 0x9: /* STHS */
1444 case 0x8: /* STBS */
1445 case 0xe: /* STWAS */
1446 case 0xc: /* STBYS */
1447 is_write = 1;
1448 }
1449 break;
1450 }
1451
1452 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1453 is_write, &uc->uc_sigmask, puc);
1454}
1455
1456#else
1457
1458#error host CPU specific signal handler needed
1459
1460#endif
1461
1462#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette