VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 54648

最後變更 在這個檔案從54648是 47550,由 vboxsync 提交於 11 年 前

REM: Hack for dispatching interrupts to vector 8 via TRPM in IEM_VERIFICATION_MODE. (This should be fixed properly one day.)

  • 屬性 svn:eol-style 設為 native
檔案大小: 53.0 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 uintptr_t next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239 uintptr_t next_tb;
240
241# ifndef VBOX
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
244# endif /* !VBOX */
245
246 cpu_single_env = env1;
247
248 /* the access to env below is actually saving the global register's
249 value, so that files not including target-xyz/exec.h are free to
250 use it. */
251 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
252 saved_env_reg = (host_reg_t) env;
253 barrier();
254 env = env1;
255
256 if (unlikely(exit_request)) {
257 env->exit_request = 1;
258 }
259
260#if defined(TARGET_I386)
261 if (!kvm_enabled()) {
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 }
268#elif defined(TARGET_SPARC)
269#elif defined(TARGET_M68K)
270 env->cc_op = CC_OP_FLAGS;
271 env->cc_dest = env->sr & 0xf;
272 env->cc_x = (env->sr >> 4) & 1;
273#elif defined(TARGET_ALPHA)
274#elif defined(TARGET_ARM)
275#elif defined(TARGET_PPC)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281 /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
286 env->exception_index = -1;
287#endif /* !VBOX */
288
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293#undef env
294 env = cpu_single_env;
295#define env cpu_single_env
296#endif
297#ifdef VBOX
298 env->current_tb = NULL; /* probably not needed, but whatever... */
299
300 /*
301 * Check for fatal errors first
302 */
303 if (env->interrupt_request & CPU_INTERRUPT_RC) {
304 env->exception_index = EXCP_RC;
305 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
306 ret = env->exception_index;
307 cpu_loop_exit();
308 }
309#endif
310
311 /* if an exception is pending, we execute it here */
312 if (env->exception_index >= 0) {
313 if (env->exception_index >= EXCP_INTERRUPT) {
314 /* exit request from the cpu execution loop */
315 ret = env->exception_index;
316#ifdef VBOX /* because of the above stuff */
317 env->exception_index = -1;
318#endif
319 if (ret == EXCP_DEBUG)
320 cpu_handle_debug_exception(env);
321 break;
322 } else {
323#if defined(CONFIG_USER_ONLY)
324 /* if user mode only, we simulate a fake exception
325 which will be handled outside the cpu execution
326 loop */
327#if defined(TARGET_I386)
328 do_interrupt_user(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip);
332 /* successfully delivered */
333 env->old_exception = -1;
334#endif
335 ret = env->exception_index;
336 break;
337#else
338#if defined(TARGET_I386)
339 /* simulate a real cpu exception. On i386, it can
340 trigger new exceptions, but we do not handle
341 double or triple faults yet. */
342# ifdef VBOX
343 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
344 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
345 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
346# endif /* VBOX */
347# ifdef IEM_VERIFICATION_MODE /* Ugly hack*/
348 do_interrupt(env->exception_index,
349 env->exception_is_int && env->exception_is_int != 0x42,
350 env->error_code,
351 env->exception_next_eip,
352 env->exception_is_int == 0x42);
353# else
354 do_interrupt(env->exception_index,
355 env->exception_is_int,
356 env->error_code,
357 env->exception_next_eip, 0);
358# endif
359 /* successfully delivered */
360 env->old_exception = -1;
361# ifdef VBOX
362 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
363# endif /* VBOX */
364#elif defined(TARGET_PPC)
365 do_interrupt(env);
366#elif defined(TARGET_MICROBLAZE)
367 do_interrupt(env);
368#elif defined(TARGET_MIPS)
369 do_interrupt(env);
370#elif defined(TARGET_SPARC)
371 do_interrupt(env);
372#elif defined(TARGET_ARM)
373 do_interrupt(env);
374#elif defined(TARGET_SH4)
375 do_interrupt(env);
376#elif defined(TARGET_ALPHA)
377 do_interrupt(env);
378#elif defined(TARGET_CRIS)
379 do_interrupt(env);
380#elif defined(TARGET_M68K)
381 do_interrupt(0);
382#endif
383 env->exception_index = -1;
384#endif
385 }
386 }
387
388# ifndef VBOX
389 if (kvm_enabled()) {
390 kvm_cpu_exec(env);
391 longjmp(env->jmp_env, 1);
392 }
393# endif /* !VBOX */
394
395 next_tb = 0; /* force lookup of first TB */
396 for(;;) {
397 interrupt_request = env->interrupt_request;
398 if (unlikely(interrupt_request)) {
399 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
400 /* Mask out external interrupts for this step. */
401 interrupt_request &= ~(CPU_INTERRUPT_HARD |
402 CPU_INTERRUPT_FIQ |
403 CPU_INTERRUPT_SMI |
404 CPU_INTERRUPT_NMI);
405 }
406 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
407 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
408 env->exception_index = EXCP_DEBUG;
409 cpu_loop_exit();
410 }
411#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
412 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
413 defined(TARGET_MICROBLAZE)
414 if (interrupt_request & CPU_INTERRUPT_HALT) {
415 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
416 env->halted = 1;
417 env->exception_index = EXCP_HLT;
418 cpu_loop_exit();
419 }
420#endif
421#if defined(TARGET_I386)
422# ifdef VBOX
423 /* Memory registration may post a tlb flush request, process it ASAP. */
424 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)) {
425 tlb_flush(env, true); /* (clears the flush flag) */
426 }
427
428 /* Single instruction exec request, we execute it and return (one way or the other).
429 The caller will always reschedule after doing this operation! */
430 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
431 {
432 /* not in flight are we? (if we are, we trapped) */
433 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
434 {
435 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
436 env->exception_index = EXCP_SINGLE_INSTR;
437 if (emulate_single_instr(env) == -1)
438 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
439
440 /* When we receive an external interrupt during execution of this single
441 instruction, then we should stay here. We will leave when we're ready
442 for raw-mode or when interrupted by pending EMT requests. */
443 interrupt_request = env->interrupt_request; /* reload this! */
444 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
445 || !(env->eflags & IF_MASK)
446 || (env->hflags & HF_INHIBIT_IRQ_MASK)
447 || (env->state & CPU_RAW_HM)
448 )
449 {
450 env->exception_index = ret = EXCP_SINGLE_INSTR;
451 cpu_loop_exit();
452 }
453 }
454 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
455 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
456# ifdef IEM_VERIFICATION_MODE
457 env->exception_index = ret = EXCP_SINGLE_INSTR;
458 cpu_loop_exit();
459# endif
460 }
461# endif /* VBOX */
462
463# ifndef VBOX /** @todo reconcile our code with the following... */
464 if (interrupt_request & CPU_INTERRUPT_INIT) {
465 svm_check_intercept(SVM_EXIT_INIT);
466 do_cpu_init(env);
467 env->exception_index = EXCP_HALTED;
468 cpu_loop_exit();
469 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
470 do_cpu_sipi(env);
471 } else if (env->hflags2 & HF2_GIF_MASK) {
472 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
473 !(env->hflags & HF_SMM_MASK)) {
474 svm_check_intercept(SVM_EXIT_SMI);
475 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
476 do_smm_enter();
477 next_tb = 0;
478 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
479 !(env->hflags2 & HF2_NMI_MASK)) {
480 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
481 env->hflags2 |= HF2_NMI_MASK;
482 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
483 next_tb = 0;
484 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
485 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
486 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
487 next_tb = 0;
488 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
489 (((env->hflags2 & HF2_VINTR_MASK) &&
490 (env->hflags2 & HF2_HIF_MASK)) ||
491 (!(env->hflags2 & HF2_VINTR_MASK) &&
492 (env->eflags & IF_MASK &&
493 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
494 int intno;
495 svm_check_intercept(SVM_EXIT_INTR);
496 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
497 intno = cpu_get_pic_interrupt(env);
498 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
499#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
500#undef env
501 env = cpu_single_env;
502#define env cpu_single_env
503#endif
504 do_interrupt(intno, 0, 0, 0, 1);
505 /* ensure that no TB jump will be modified as
506 the program flow was changed */
507 next_tb = 0;
508#if !defined(CONFIG_USER_ONLY)
509 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
510 (env->eflags & IF_MASK) &&
511 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
512 int intno;
513 /* FIXME: this should respect TPR */
514 svm_check_intercept(SVM_EXIT_VINTR);
515 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
516 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
517 do_interrupt(intno, 0, 0, 0, 1);
518 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
519 next_tb = 0;
520#endif
521 }
522 }
523# else /* VBOX */
524 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
525 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
526 !(env->hflags & HF_SMM_MASK)) {
527 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
528 do_smm_enter();
529 next_tb = 0;
530 }
531 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
532 (env->eflags & IF_MASK) &&
533 !(env->hflags & HF_INHIBIT_IRQ_MASK))
534 {
535 /* if hardware interrupt pending, we execute it */
536 int intno;
537 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
538 intno = cpu_get_pic_interrupt(env);
539 if (intno >= 0)
540 {
541 Log(("do_interrupt %d\n", intno));
542 do_interrupt(intno, 0, 0, 0, 1);
543 }
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
546 next_tb = 0;
547 }
548# endif /* VBOX */
549#elif defined(TARGET_PPC)
550#if 0
551 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
552 cpu_reset(env);
553 }
554#endif
555 if (interrupt_request & CPU_INTERRUPT_HARD) {
556 ppc_hw_interrupt(env);
557 if (env->pending_interrupts == 0)
558 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
559 next_tb = 0;
560 }
561#elif defined(TARGET_MICROBLAZE)
562 if ((interrupt_request & CPU_INTERRUPT_HARD)
563 && (env->sregs[SR_MSR] & MSR_IE)
564 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
565 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
566 env->exception_index = EXCP_IRQ;
567 do_interrupt(env);
568 next_tb = 0;
569 }
570#elif defined(TARGET_MIPS)
571 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
572 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
573 (env->CP0_Status & (1 << CP0St_IE)) &&
574 !(env->CP0_Status & (1 << CP0St_EXL)) &&
575 !(env->CP0_Status & (1 << CP0St_ERL)) &&
576 !(env->hflags & MIPS_HFLAG_DM)) {
577 /* Raise it */
578 env->exception_index = EXCP_EXT_INTERRUPT;
579 env->error_code = 0;
580 do_interrupt(env);
581 next_tb = 0;
582 }
583#elif defined(TARGET_SPARC)
584 if (interrupt_request & CPU_INTERRUPT_HARD) {
585 if (cpu_interrupts_enabled(env) &&
586 env->interrupt_index > 0) {
587 int pil = env->interrupt_index & 0xf;
588 int type = env->interrupt_index & 0xf0;
589
590 if (((type == TT_EXTINT) &&
591 cpu_pil_allowed(env, pil)) ||
592 type != TT_EXTINT) {
593 env->exception_index = env->interrupt_index;
594 do_interrupt(env);
595 next_tb = 0;
596 }
597 }
598 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
599 //do_interrupt(0, 0, 0, 0, 0);
600 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
601 }
602#elif defined(TARGET_ARM)
603 if (interrupt_request & CPU_INTERRUPT_FIQ
604 && !(env->uncached_cpsr & CPSR_F)) {
605 env->exception_index = EXCP_FIQ;
606 do_interrupt(env);
607 next_tb = 0;
608 }
609 /* ARMv7-M interrupt return works by loading a magic value
610 into the PC. On real hardware the load causes the
611 return to occur. The qemu implementation performs the
612 jump normally, then does the exception return when the
613 CPU tries to execute code at the magic address.
614 This will cause the magic PC value to be pushed to
615 the stack if an interrupt occured at the wrong time.
616 We avoid this by disabling interrupts when
617 pc contains a magic address. */
618 if (interrupt_request & CPU_INTERRUPT_HARD
619 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
620 || !(env->uncached_cpsr & CPSR_I))) {
621 env->exception_index = EXCP_IRQ;
622 do_interrupt(env);
623 next_tb = 0;
624 }
625#elif defined(TARGET_SH4)
626 if (interrupt_request & CPU_INTERRUPT_HARD) {
627 do_interrupt(env);
628 next_tb = 0;
629 }
630#elif defined(TARGET_ALPHA)
631 if (interrupt_request & CPU_INTERRUPT_HARD) {
632 do_interrupt(env);
633 next_tb = 0;
634 }
635#elif defined(TARGET_CRIS)
636 if (interrupt_request & CPU_INTERRUPT_HARD
637 && (env->pregs[PR_CCS] & I_FLAG)
638 && !env->locked_irq) {
639 env->exception_index = EXCP_IRQ;
640 do_interrupt(env);
641 next_tb = 0;
642 }
643 if (interrupt_request & CPU_INTERRUPT_NMI
644 && (env->pregs[PR_CCS] & M_FLAG)) {
645 env->exception_index = EXCP_NMI;
646 do_interrupt(env);
647 next_tb = 0;
648 }
649#elif defined(TARGET_M68K)
650 if (interrupt_request & CPU_INTERRUPT_HARD
651 && ((env->sr & SR_I) >> SR_I_SHIFT)
652 < env->pending_level) {
653 /* Real hardware gets the interrupt vector via an
654 IACK cycle at this point. Current emulated
655 hardware doesn't rely on this, so we
656 provide/save the vector when the interrupt is
657 first signalled. */
658 env->exception_index = env->pending_vector;
659 do_interrupt(1);
660 next_tb = 0;
661 }
662#endif
663 /* Don't use the cached interupt_request value,
664 do_interrupt may have updated the EXITTB flag. */
665 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
666#ifndef VBOX
667 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
668#else /* VBOX */
669 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
670#endif /* VBOX */
671 /* ensure that no TB jump will be modified as
672 the program flow was changed */
673 next_tb = 0;
674 }
675#ifdef VBOX
676 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
677 if (interrupt_request & CPU_INTERRUPT_RC) {
678 env->exception_index = EXCP_RC;
679 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
680 ret = env->exception_index;
681 cpu_loop_exit();
682 }
683 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
684 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
685 env->exit_request = 1;
686 }
687#endif
688 }
689 if (unlikely(env->exit_request)) {
690 env->exit_request = 0;
691 env->exception_index = EXCP_INTERRUPT;
692 cpu_loop_exit();
693 }
694
695#ifdef VBOX
696 /*
697 * Check if we the CPU state allows us to execute the code in raw-mode.
698 */
699 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
700 if (remR3CanExecuteRaw(env,
701 env->eip + env->segs[R_CS].base,
702 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
703 &env->exception_index))
704 {
705 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
706 ret = env->exception_index;
707 cpu_loop_exit();
708 }
709 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
710#endif /* VBOX */
711
712#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
713 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
714 /* restore flags in standard format */
715#if defined(TARGET_I386)
716 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
717 log_cpu_state(env, X86_DUMP_CCOP);
718 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
719#elif defined(TARGET_M68K)
720 cpu_m68k_flush_flags(env, env->cc_op);
721 env->cc_op = CC_OP_FLAGS;
722 env->sr = (env->sr & 0xffe0)
723 | env->cc_dest | (env->cc_x << 4);
724 log_cpu_state(env, 0);
725#else
726 log_cpu_state(env, 0);
727#endif
728 }
729#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
730#ifdef VBOX
731 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
732#endif /*VBOX*/
733 spin_lock(&tb_lock);
734 tb = tb_find_fast();
735 /* Note: we do it here to avoid a gcc bug on Mac OS X when
736 doing it in tb_find_slow */
737 if (tb_invalidated_flag) {
738 /* as some TB could have been invalidated because
739 of memory exceptions while generating the code, we
740 must recompute the hash index here */
741 next_tb = 0;
742 tb_invalidated_flag = 0;
743 }
744#ifdef CONFIG_DEBUG_EXEC
745 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
746 (void *)tb->tc_ptr, tb->pc,
747 lookup_symbol(tb->pc));
748#endif
749 /* see if we can patch the calling TB. When the TB
750 spans two pages, we cannot safely do a direct
751 jump. */
752#ifndef VBOX
753 if (next_tb != 0 && tb->page_addr[1] == -1) {
754#else /* VBOX */
755 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
756#endif /* VBOX */
757 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
758 }
759 spin_unlock(&tb_lock);
760#ifdef VBOX
761 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
762#endif
763
764 /* cpu_interrupt might be called while translating the
765 TB, but before it is linked into a potentially
766 infinite loop and becomes env->current_tb. Avoid
767 starting execution if there is a pending interrupt. */
768 env->current_tb = tb;
769 barrier();
770 if (likely(!env->exit_request)) {
771 tc_ptr = tb->tc_ptr;
772 /* execute the generated code */
773#ifdef VBOX
774 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
775#endif
776#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
777#undef env
778 env = cpu_single_env;
779#define env cpu_single_env
780#endif
781 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip));
782#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
783 tcg_qemu_tb_exec(tc_ptr, next_tb);
784#else
785 next_tb = tcg_qemu_tb_exec(tc_ptr);
786#endif
787 if (next_tb)
788 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip));
789#ifdef VBOX
790 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
791#endif
792 if ((next_tb & 3) == 2) {
793 /* Instruction counter expired. */
794 int insns_left;
795 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3);
796 /* Restore PC. */
797 cpu_pc_from_tb(env, tb);
798 insns_left = env->icount_decr.u32;
799 if (env->icount_extra && insns_left >= 0) {
800 /* Refill decrementer and continue execution. */
801 env->icount_extra += insns_left;
802 if (env->icount_extra > 0xffff) {
803 insns_left = 0xffff;
804 } else {
805 insns_left = env->icount_extra;
806 }
807 env->icount_extra -= insns_left;
808 env->icount_decr.u16.low = insns_left;
809 } else {
810 if (insns_left > 0) {
811 /* Execute remaining instructions. */
812 cpu_exec_nocache(insns_left, tb);
813 }
814 env->exception_index = EXCP_INTERRUPT;
815 next_tb = 0;
816 cpu_loop_exit();
817 }
818 }
819 }
820 env->current_tb = NULL;
821 /* reset soft MMU for next block (it can currently
822 only be set by a memory fault) */
823 } /* for(;;) */
824 }
825#ifdef VBOX_HIGH_RES_TIMERS_HACK
826 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
827 unnecessary (like crashing during emulate single instruction).
828 Note! Don't use env1->pVM here, the code wouldn't run with
829 gcc-4.4/amd64 anymore, see #3883. */
830 env->current_tb = NULL;
831 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
832 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
833 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
834 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
835 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
836 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
837 TMR3TimerQueuesDo(env->pVM);
838 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
839 }
840#endif
841 } /* for(;;) */
842
843
844#if defined(TARGET_I386)
845 /* restore flags in standard format */
846 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
847#elif defined(TARGET_ARM)
848 /* XXX: Save/restore host fpu exception state?. */
849#elif defined(TARGET_SPARC)
850#elif defined(TARGET_PPC)
851#elif defined(TARGET_M68K)
852 cpu_m68k_flush_flags(env, env->cc_op);
853 env->cc_op = CC_OP_FLAGS;
854 env->sr = (env->sr & 0xffe0)
855 | env->cc_dest | (env->cc_x << 4);
856#elif defined(TARGET_MICROBLAZE)
857#elif defined(TARGET_MIPS)
858#elif defined(TARGET_SH4)
859#elif defined(TARGET_ALPHA)
860#elif defined(TARGET_CRIS)
861#elif defined(TARGET_S390X)
862 /* XXXXX */
863#else
864#error unsupported target CPU
865#endif
866
867 /* restore global registers */
868 barrier();
869 env = (void *) saved_env_reg;
870
871# ifndef VBOX /* we might be using elsewhere, we only have one. */
872 /* fail safe : never use cpu_single_env outside cpu_exec() */
873 cpu_single_env = NULL;
874# endif
875 return ret;
876}
877
878/* must only be called from the generated code as an exception can be
879 generated */
880void tb_invalidate_page_range(target_ulong start, target_ulong end)
881{
882 /* XXX: cannot enable it yet because it yields to MMU exception
883 where NIP != read address on PowerPC */
884#if 0
885 target_ulong phys_addr;
886 phys_addr = get_phys_addr_code(env, start);
887 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
888#endif
889}
890
891#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
892
893void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
894{
895 CPUX86State *saved_env;
896
897 saved_env = env;
898 env = s;
899 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
900 selector &= 0xffff;
901 cpu_x86_load_seg_cache(env, seg_reg, selector,
902 (selector << 4), 0xffff, 0);
903 } else {
904 helper_load_seg(seg_reg, selector);
905 }
906 env = saved_env;
907}
908
909void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
910{
911 CPUX86State *saved_env;
912
913 saved_env = env;
914 env = s;
915
916 helper_fsave(ptr, data32);
917
918 env = saved_env;
919}
920
921void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
922{
923 CPUX86State *saved_env;
924
925 saved_env = env;
926 env = s;
927
928 helper_frstor(ptr, data32);
929
930 env = saved_env;
931}
932
933#endif /* TARGET_I386 */
934
935#if !defined(CONFIG_SOFTMMU)
936
937#if defined(TARGET_I386)
938#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
939#else
940#define EXCEPTION_ACTION cpu_loop_exit()
941#endif
942
943/* 'pc' is the host PC at which the exception was raised. 'address' is
944 the effective address of the memory exception. 'is_write' is 1 if a
945 write caused the exception and otherwise 0'. 'old_set' is the
946 signal set which should be restored */
947static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
948 int is_write, sigset_t *old_set,
949 void *puc)
950{
951 TranslationBlock *tb;
952 int ret;
953
954 if (cpu_single_env)
955 env = cpu_single_env; /* XXX: find a correct solution for multithread */
956#if defined(DEBUG_SIGNAL)
957 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
958 pc, address, is_write, *(unsigned long *)old_set);
959#endif
960 /* XXX: locking issue */
961 if (is_write && page_unprotect(h2g(address), pc, puc)) {
962 return 1;
963 }
964
965 /* see if it is an MMU fault */
966 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
967 if (ret < 0)
968 return 0; /* not an MMU fault */
969 if (ret == 0)
970 return 1; /* the MMU fault was handled without causing real CPU fault */
971 /* now we have a real cpu fault */
972 tb = tb_find_pc(pc);
973 if (tb) {
974 /* the PC is inside the translated code. It means that we have
975 a virtual CPU fault */
976 cpu_restore_state(tb, env, pc, puc);
977 }
978
979 /* we restore the process signal mask as the sigreturn should
980 do it (XXX: use sigsetjmp) */
981 sigprocmask(SIG_SETMASK, old_set, NULL);
982 EXCEPTION_ACTION;
983
984 /* never comes here */
985 return 1;
986}
987
988#if defined(__i386__)
989
990#if defined(__APPLE__)
991# include <sys/ucontext.h>
992
993# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
994# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
995# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
996# define MASK_sig(context) ((context)->uc_sigmask)
997#elif defined (__NetBSD__)
998# include <ucontext.h>
999
1000# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
1001# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1002# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1003# define MASK_sig(context) ((context)->uc_sigmask)
1004#elif defined (__FreeBSD__) || defined(__DragonFly__)
1005# include <ucontext.h>
1006
1007# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
1008# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1009# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1010# define MASK_sig(context) ((context)->uc_sigmask)
1011#elif defined(__OpenBSD__)
1012# define EIP_sig(context) ((context)->sc_eip)
1013# define TRAP_sig(context) ((context)->sc_trapno)
1014# define ERROR_sig(context) ((context)->sc_err)
1015# define MASK_sig(context) ((context)->sc_mask)
1016#else
1017# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1018# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1019# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1020# define MASK_sig(context) ((context)->uc_sigmask)
1021#endif
1022
1023int cpu_signal_handler(int host_signum, void *pinfo,
1024 void *puc)
1025{
1026 siginfo_t *info = pinfo;
1027#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1028 ucontext_t *uc = puc;
1029#elif defined(__OpenBSD__)
1030 struct sigcontext *uc = puc;
1031#else
1032 struct ucontext *uc = puc;
1033#endif
1034 uintptr_t pc;
1035 int trapno;
1036
1037#ifndef REG_EIP
1038/* for glibc 2.1 */
1039#define REG_EIP EIP
1040#define REG_ERR ERR
1041#define REG_TRAPNO TRAPNO
1042#endif
1043 pc = EIP_sig(uc);
1044 trapno = TRAP_sig(uc);
1045 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1046 trapno == 0xe ?
1047 (ERROR_sig(uc) >> 1) & 1 : 0,
1048 &MASK_sig(uc), puc);
1049}
1050
1051#elif defined(__x86_64__)
1052
1053#ifdef __NetBSD__
1054#define PC_sig(context) _UC_MACHINE_PC(context)
1055#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1056#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1057#define MASK_sig(context) ((context)->uc_sigmask)
1058#elif defined(__OpenBSD__)
1059#define PC_sig(context) ((context)->sc_rip)
1060#define TRAP_sig(context) ((context)->sc_trapno)
1061#define ERROR_sig(context) ((context)->sc_err)
1062#define MASK_sig(context) ((context)->sc_mask)
1063#elif defined (__FreeBSD__) || defined(__DragonFly__)
1064#include <ucontext.h>
1065
1066#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1067#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1068#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1069#define MASK_sig(context) ((context)->uc_sigmask)
1070#else
1071#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1072#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1073#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1074#define MASK_sig(context) ((context)->uc_sigmask)
1075#endif
1076
1077int cpu_signal_handler(int host_signum, void *pinfo,
1078 void *puc)
1079{
1080 siginfo_t *info = pinfo;
1081 uintptr_t pc;
1082#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1083 ucontext_t *uc = puc;
1084#elif defined(__OpenBSD__)
1085 struct sigcontext *uc = puc;
1086#else
1087 struct ucontext *uc = puc;
1088#endif
1089
1090 pc = PC_sig(uc);
1091 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1092 TRAP_sig(uc) == 0xe ?
1093 (ERROR_sig(uc) >> 1) & 1 : 0,
1094 &MASK_sig(uc), puc);
1095}
1096
1097#elif defined(_ARCH_PPC)
1098
1099/***********************************************************************
1100 * signal context platform-specific definitions
1101 * From Wine
1102 */
1103#ifdef linux
1104/* All Registers access - only for local access */
1105# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1106/* Gpr Registers access */
1107# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1108# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1109# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1110# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1111# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1112# define LR_sig(context) REG_sig(link, context) /* Link register */
1113# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1114/* Float Registers access */
1115# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1116# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1117/* Exception Registers access */
1118# define DAR_sig(context) REG_sig(dar, context)
1119# define DSISR_sig(context) REG_sig(dsisr, context)
1120# define TRAP_sig(context) REG_sig(trap, context)
1121#endif /* linux */
1122
1123#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1124#include <ucontext.h>
1125# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1126# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1127# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1128# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1129# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1130# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1131/* Exception Registers access */
1132# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1133# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1134# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1135#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1136
1137#ifdef __APPLE__
1138# include <sys/ucontext.h>
1139typedef struct ucontext SIGCONTEXT;
1140/* All Registers access - only for local access */
1141# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1142# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1143# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1144# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1145/* Gpr Registers access */
1146# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1147# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1148# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1149# define CTR_sig(context) REG_sig(ctr, context)
1150# define XER_sig(context) REG_sig(xer, context) /* Link register */
1151# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1152# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1153/* Float Registers access */
1154# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1155# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1156/* Exception Registers access */
1157# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1158# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1159# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1160#endif /* __APPLE__ */
1161
1162int cpu_signal_handler(int host_signum, void *pinfo,
1163 void *puc)
1164{
1165 siginfo_t *info = pinfo;
1166#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1167 ucontext_t *uc = puc;
1168#else
1169 struct ucontext *uc = puc;
1170#endif
1171 uintptr_t pc;
1172 int is_write;
1173
1174 pc = IAR_sig(uc);
1175 is_write = 0;
1176#if 0
1177 /* ppc 4xx case */
1178 if (DSISR_sig(uc) & 0x00800000)
1179 is_write = 1;
1180#else
1181 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1182 is_write = 1;
1183#endif
1184 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1185 is_write, &uc->uc_sigmask, puc);
1186}
1187
1188#elif defined(__alpha__)
1189
1190int cpu_signal_handler(int host_signum, void *pinfo,
1191 void *puc)
1192{
1193 siginfo_t *info = pinfo;
1194 struct ucontext *uc = puc;
1195 uint32_t *pc = uc->uc_mcontext.sc_pc;
1196 uint32_t insn = *pc;
1197 int is_write = 0;
1198
1199 /* XXX: need kernel patch to get write flag faster */
1200 switch (insn >> 26) {
1201 case 0x0d: // stw
1202 case 0x0e: // stb
1203 case 0x0f: // stq_u
1204 case 0x24: // stf
1205 case 0x25: // stg
1206 case 0x26: // sts
1207 case 0x27: // stt
1208 case 0x2c: // stl
1209 case 0x2d: // stq
1210 case 0x2e: // stl_c
1211 case 0x2f: // stq_c
1212 is_write = 1;
1213 }
1214
1215 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1216 is_write, &uc->uc_sigmask, puc);
1217}
1218#elif defined(__sparc__)
1219
1220int cpu_signal_handler(int host_signum, void *pinfo,
1221 void *puc)
1222{
1223 siginfo_t *info = pinfo;
1224 int is_write;
1225 uint32_t insn;
1226#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1227 uint32_t *regs = (uint32_t *)(info + 1);
1228 void *sigmask = (regs + 20);
1229 /* XXX: is there a standard glibc define ? */
1230 uintptr_t pc = regs[1];
1231#else
1232#ifdef __linux__
1233 struct sigcontext *sc = puc;
1234 uintptr_t pc = sc->sigc_regs.tpc;
1235 void *sigmask = (void *)sc->sigc_mask;
1236#elif defined(__OpenBSD__)
1237 struct sigcontext *uc = puc;
1238 uintptr_t pc = uc->sc_pc;
1239 void *sigmask = (void *)(uintptr_t)uc->sc_mask;
1240#endif
1241#endif
1242
1243 /* XXX: need kernel patch to get write flag faster */
1244 is_write = 0;
1245 insn = *(uint32_t *)pc;
1246 if ((insn >> 30) == 3) {
1247 switch((insn >> 19) & 0x3f) {
1248 case 0x05: // stb
1249 case 0x15: // stba
1250 case 0x06: // sth
1251 case 0x16: // stha
1252 case 0x04: // st
1253 case 0x14: // sta
1254 case 0x07: // std
1255 case 0x17: // stda
1256 case 0x0e: // stx
1257 case 0x1e: // stxa
1258 case 0x24: // stf
1259 case 0x34: // stfa
1260 case 0x27: // stdf
1261 case 0x37: // stdfa
1262 case 0x26: // stqf
1263 case 0x36: // stqfa
1264 case 0x25: // stfsr
1265 case 0x3c: // casa
1266 case 0x3e: // casxa
1267 is_write = 1;
1268 break;
1269 }
1270 }
1271 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1272 is_write, sigmask, NULL);
1273}
1274
1275#elif defined(__arm__)
1276
1277int cpu_signal_handler(int host_signum, void *pinfo,
1278 void *puc)
1279{
1280 siginfo_t *info = pinfo;
1281 struct ucontext *uc = puc;
1282 uintptr_t pc;
1283 int is_write;
1284
1285#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1286 pc = uc->uc_mcontext.gregs[R15];
1287#else
1288 pc = uc->uc_mcontext.arm_pc;
1289#endif
1290 /* XXX: compute is_write */
1291 is_write = 0;
1292 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1293 is_write,
1294 &uc->uc_sigmask, puc);
1295}
1296
1297#elif defined(__mc68000)
1298
1299int cpu_signal_handler(int host_signum, void *pinfo,
1300 void *puc)
1301{
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1304 uintptr_t pc;
1305 int is_write;
1306
1307 pc = uc->uc_mcontext.gregs[16];
1308 /* XXX: compute is_write */
1309 is_write = 0;
1310 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1311 is_write,
1312 &uc->uc_sigmask, puc);
1313}
1314
1315#elif defined(__ia64)
1316
1317#ifndef __ISR_VALID
1318 /* This ought to be in <bits/siginfo.h>... */
1319# define __ISR_VALID 1
1320#endif
1321
1322int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1323{
1324 siginfo_t *info = pinfo;
1325 struct ucontext *uc = puc;
1326 uintptr_t ip;
1327 int is_write = 0;
1328
1329 ip = uc->uc_mcontext.sc_ip;
1330 switch (host_signum) {
1331 case SIGILL:
1332 case SIGFPE:
1333 case SIGSEGV:
1334 case SIGBUS:
1335 case SIGTRAP:
1336 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1337 /* ISR.W (write-access) is bit 33: */
1338 is_write = (info->si_isr >> 33) & 1;
1339 break;
1340
1341 default:
1342 break;
1343 }
1344 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1345 is_write,
1346 (sigset_t *)&uc->uc_sigmask, puc);
1347}
1348
1349#elif defined(__s390__)
1350
1351int cpu_signal_handler(int host_signum, void *pinfo,
1352 void *puc)
1353{
1354 siginfo_t *info = pinfo;
1355 struct ucontext *uc = puc;
1356 uintptr_t pc;
1357 uint16_t *pinsn;
1358 int is_write = 0;
1359
1360 pc = uc->uc_mcontext.psw.addr;
1361
1362 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1363 of the normal 2 arguments. The 3rd argument contains the "int_code"
1364 from the hardware which does in fact contain the is_write value.
1365 The rt signal handler, as far as I can tell, does not give this value
1366 at all. Not that we could get to it from here even if it were. */
1367 /* ??? This is not even close to complete, since it ignores all
1368 of the read-modify-write instructions. */
1369 pinsn = (uint16_t *)pc;
1370 switch (pinsn[0] >> 8) {
1371 case 0x50: /* ST */
1372 case 0x42: /* STC */
1373 case 0x40: /* STH */
1374 is_write = 1;
1375 break;
1376 case 0xc4: /* RIL format insns */
1377 switch (pinsn[0] & 0xf) {
1378 case 0xf: /* STRL */
1379 case 0xb: /* STGRL */
1380 case 0x7: /* STHRL */
1381 is_write = 1;
1382 }
1383 break;
1384 case 0xe3: /* RXY format insns */
1385 switch (pinsn[2] & 0xff) {
1386 case 0x50: /* STY */
1387 case 0x24: /* STG */
1388 case 0x72: /* STCY */
1389 case 0x70: /* STHY */
1390 case 0x8e: /* STPQ */
1391 case 0x3f: /* STRVH */
1392 case 0x3e: /* STRV */
1393 case 0x2f: /* STRVG */
1394 is_write = 1;
1395 }
1396 break;
1397 }
1398 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1399 is_write, &uc->uc_sigmask, puc);
1400}
1401
1402#elif defined(__mips__)
1403
1404int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1406{
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 greg_t pc = uc->uc_mcontext.pc;
1410 int is_write;
1411
1412 /* XXX: compute is_write */
1413 is_write = 0;
1414 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1415 is_write, &uc->uc_sigmask, puc);
1416}
1417
1418#elif defined(__hppa__)
1419
1420int cpu_signal_handler(int host_signum, void *pinfo,
1421 void *puc)
1422{
1423 struct siginfo *info = pinfo;
1424 struct ucontext *uc = puc;
1425 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0];
1426 uint32_t insn = *(uint32_t *)pc;
1427 int is_write = 0;
1428
1429 /* XXX: need kernel patch to get write flag faster. */
1430 switch (insn >> 26) {
1431 case 0x1a: /* STW */
1432 case 0x19: /* STH */
1433 case 0x18: /* STB */
1434 case 0x1b: /* STWM */
1435 is_write = 1;
1436 break;
1437
1438 case 0x09: /* CSTWX, FSTWX, FSTWS */
1439 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1440 /* Distinguish from coprocessor load ... */
1441 is_write = (insn >> 9) & 1;
1442 break;
1443
1444 case 0x03:
1445 switch ((insn >> 6) & 15) {
1446 case 0xa: /* STWS */
1447 case 0x9: /* STHS */
1448 case 0x8: /* STBS */
1449 case 0xe: /* STWAS */
1450 case 0xc: /* STBYS */
1451 is_write = 1;
1452 }
1453 break;
1454 }
1455
1456 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1457 is_write, &uc->uc_sigmask, puc);
1458}
1459
1460#else
1461
1462#error host CPU specific signal handler needed
1463
1464#endif
1465
1466#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette