VirtualBox

source: vbox/trunk/src/recompiler/cpu-exec.c@ 60893

最後變更 在這個檔案從60893是 60893,由 vboxsync 提交於 9 年 前

cpu-exec.c: Another IEM_VERIFICATION_MODE hack.

  • 屬性 svn:eol-style 設為 native
檔案大小: 53.0 KB
 
1/*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*
21 * Oracle LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
22 * other than GPL or LGPL is available it will apply instead, Oracle elects to use only
23 * the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
24 * a choice of LGPL license versions is made available with the language indicating
25 * that LGPLv2 or any later version may be used, or where a choice of which version
26 * of the LGPL is applied is otherwise unspecified.
27 */
28
29#include "config.h"
30#include "exec.h"
31#include "disas.h"
32#include "tcg.h"
33#include "kvm.h"
34#include "qemu-barrier.h"
35
36#if !defined(CONFIG_SOFTMMU)
37#undef EAX
38#undef ECX
39#undef EDX
40#undef EBX
41#undef ESP
42#undef EBP
43#undef ESI
44#undef EDI
45#undef EIP
46#include <signal.h>
47#ifdef __linux__
48#include <sys/ucontext.h>
49#endif
50#endif
51
52#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
53// Work around ugly bugs in glibc that mangle global register contents
54#undef env
55#define env cpu_single_env
56#endif
57
58int tb_invalidated_flag;
59
60//#define CONFIG_DEBUG_EXEC
61//#define DEBUG_SIGNAL
62
63int qemu_cpu_has_work(CPUState *env)
64{
65 return cpu_has_work(env);
66}
67
68void cpu_loop_exit(void)
69{
70 env->current_tb = NULL;
71 longjmp(env->jmp_env, 1);
72}
73
74/* exit the current TB from a signal handler. The host registers are
75 restored in a state compatible with the CPU emulator
76 */
77void cpu_resume_from_signal(CPUState *env1, void *puc)
78{
79#if !defined(CONFIG_SOFTMMU)
80#ifdef __linux__
81 struct ucontext *uc = puc;
82#elif defined(__OpenBSD__)
83 struct sigcontext *uc = puc;
84#endif
85#endif
86
87 env = env1;
88
89 /* XXX: restore cpu registers saved in host registers */
90
91#if !defined(CONFIG_SOFTMMU)
92 if (puc) {
93 /* XXX: use siglongjmp ? */
94#ifdef __linux__
95#ifdef __ia64
96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
97#else
98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
99#endif
100#elif defined(__OpenBSD__)
101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
102#endif
103 }
104#endif
105 env->exception_index = -1;
106 longjmp(env->jmp_env, 1);
107}
108
109/* Execute the code without caching the generated code. An interpreter
110 could be used if available. */
111static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
112{
113 uintptr_t next_tb;
114 TranslationBlock *tb;
115
116 /* Should never happen.
117 We only end up here when an existing TB is too long. */
118 if (max_cycles > CF_COUNT_MASK)
119 max_cycles = CF_COUNT_MASK;
120
121 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
122 max_cycles);
123 env->current_tb = tb;
124 /* execute the generated code */
125#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
126 tcg_qemu_tb_exec(tb->tc_ptr, next_tb);
127#else
128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
129#endif
130 env->current_tb = NULL;
131
132 if ((next_tb & 3) == 2) {
133 /* Restore PC. This may happen if async event occurs before
134 the TB starts executing. */
135 cpu_pc_from_tb(env, tb);
136 }
137 tb_phys_invalidate(tb, -1);
138 tb_free(tb);
139}
140
141static TranslationBlock *tb_find_slow(target_ulong pc,
142 target_ulong cs_base,
143 uint64_t flags)
144{
145 TranslationBlock *tb, **ptb1;
146 unsigned int h;
147 tb_page_addr_t phys_pc, phys_page1, phys_page2;
148 target_ulong virt_page2;
149
150 tb_invalidated_flag = 0;
151
152 /* find translated block using physical mappings */
153 phys_pc = get_page_addr_code(env, pc);
154 phys_page1 = phys_pc & TARGET_PAGE_MASK;
155 phys_page2 = -1;
156 h = tb_phys_hash_func(phys_pc);
157 ptb1 = &tb_phys_hash[h];
158 for(;;) {
159 tb = *ptb1;
160 if (!tb)
161 goto not_found;
162 if (tb->pc == pc &&
163 tb->page_addr[0] == phys_page1 &&
164 tb->cs_base == cs_base &&
165 tb->flags == flags) {
166 /* check next page if needed */
167 if (tb->page_addr[1] != -1) {
168 virt_page2 = (pc & TARGET_PAGE_MASK) +
169 TARGET_PAGE_SIZE;
170 phys_page2 = get_page_addr_code(env, virt_page2);
171 if (tb->page_addr[1] == phys_page2)
172 goto found;
173 } else {
174 goto found;
175 }
176 }
177 ptb1 = &tb->phys_hash_next;
178 }
179 not_found:
180 /* if no translated code available, then translate it now */
181 tb = tb_gen_code(env, pc, cs_base, flags, 0);
182
183 found:
184 /* we add the TB in the virtual pc hash table */
185 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
186 return tb;
187}
188
189static inline TranslationBlock *tb_find_fast(void)
190{
191 TranslationBlock *tb;
192 target_ulong cs_base, pc;
193 int flags;
194
195 /* we record a subset of the CPU state. It will
196 always be the same before a given translated block
197 is executed. */
198 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
199 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
200 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
201 tb->flags != flags)) {
202 tb = tb_find_slow(pc, cs_base, flags);
203 }
204 return tb;
205}
206
207static CPUDebugExcpHandler *debug_excp_handler;
208
209CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
210{
211 CPUDebugExcpHandler *old_handler = debug_excp_handler;
212
213 debug_excp_handler = handler;
214 return old_handler;
215}
216
217static void cpu_handle_debug_exception(CPUState *env)
218{
219 CPUWatchpoint *wp;
220
221 if (!env->watchpoint_hit)
222 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
223 wp->flags &= ~BP_WATCHPOINT_HIT;
224
225 if (debug_excp_handler)
226 debug_excp_handler(env);
227}
228
229/* main execution loop */
230
231volatile sig_atomic_t exit_request;
232
233int cpu_exec(CPUState *env1)
234{
235 volatile host_reg_t saved_env_reg;
236 int ret VBOX_ONLY(= 0), interrupt_request;
237 TranslationBlock *tb;
238 uint8_t *tc_ptr;
239 uintptr_t next_tb;
240
241# ifndef VBOX
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
244# endif /* !VBOX */
245
246 cpu_single_env = env1;
247
248 /* the access to env below is actually saving the global register's
249 value, so that files not including target-xyz/exec.h are free to
250 use it. */
251 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
252 saved_env_reg = (host_reg_t) env;
253 barrier();
254 env = env1;
255
256 if (unlikely(exit_request)) {
257 env->exit_request = 1;
258 }
259
260#if defined(TARGET_I386)
261 if (!kvm_enabled()) {
262 /* put eflags in CPU temporary format */
263 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
264 DF = 1 - (2 * ((env->eflags >> 10) & 1));
265 CC_OP = CC_OP_EFLAGS;
266 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 }
268#elif defined(TARGET_SPARC)
269#elif defined(TARGET_M68K)
270 env->cc_op = CC_OP_FLAGS;
271 env->cc_dest = env->sr & 0xf;
272 env->cc_x = (env->sr >> 4) & 1;
273#elif defined(TARGET_ALPHA)
274#elif defined(TARGET_ARM)
275#elif defined(TARGET_PPC)
276#elif defined(TARGET_MICROBLAZE)
277#elif defined(TARGET_MIPS)
278#elif defined(TARGET_SH4)
279#elif defined(TARGET_CRIS)
280#elif defined(TARGET_S390X)
281 /* XXXXX */
282#else
283#error unsupported target CPU
284#endif
285#ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */
286 env->exception_index = -1;
287#endif /* !VBOX */
288
289 /* prepare setjmp context for exception handling */
290 for(;;) {
291 if (setjmp(env->jmp_env) == 0) {
292#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
293#undef env
294 env = cpu_single_env;
295#define env cpu_single_env
296#endif
297#ifdef VBOX
298 env->current_tb = NULL; /* probably not needed, but whatever... */
299
300 /*
301 * Check for fatal errors first
302 */
303 if (env->interrupt_request & CPU_INTERRUPT_RC) {
304 env->exception_index = EXCP_RC;
305 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
306 ret = env->exception_index;
307 cpu_loop_exit();
308 }
309#endif
310
311 /* if an exception is pending, we execute it here */
312 if (env->exception_index >= 0) {
313 if (env->exception_index >= EXCP_INTERRUPT) {
314 /* exit request from the cpu execution loop */
315 ret = env->exception_index;
316#ifdef VBOX /* because of the above stuff */
317 env->exception_index = -1;
318#endif
319 if (ret == EXCP_DEBUG)
320 cpu_handle_debug_exception(env);
321 break;
322 } else {
323#if defined(CONFIG_USER_ONLY)
324 /* if user mode only, we simulate a fake exception
325 which will be handled outside the cpu execution
326 loop */
327#if defined(TARGET_I386)
328 do_interrupt_user(env->exception_index,
329 env->exception_is_int,
330 env->error_code,
331 env->exception_next_eip);
332 /* successfully delivered */
333 env->old_exception = -1;
334#endif
335 ret = env->exception_index;
336 break;
337#else
338#if defined(TARGET_I386)
339 /* simulate a real cpu exception. On i386, it can
340 trigger new exceptions, but we do not handle
341 double or triple faults yet. */
342# ifdef VBOX
343 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
344 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int,
345 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip));
346# endif /* VBOX */
347# ifdef IEM_VERIFICATION_MODE /* Ugly hacks */
348 do_interrupt(env->exception_index,
349 env->exception_is_int && env->exception_is_int != 0x42,
350 env->error_code,
351 env->exception_next_eip,
352 env->exception_is_int == 0x42);
353 cpu_loop_exit();
354# else
355 do_interrupt(env->exception_index,
356 env->exception_is_int,
357 env->error_code,
358 env->exception_next_eip, 0);
359# endif
360 /* successfully delivered */
361 env->old_exception = -1;
362# ifdef VBOX
363 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
364# endif /* VBOX */
365#elif defined(TARGET_PPC)
366 do_interrupt(env);
367#elif defined(TARGET_MICROBLAZE)
368 do_interrupt(env);
369#elif defined(TARGET_MIPS)
370 do_interrupt(env);
371#elif defined(TARGET_SPARC)
372 do_interrupt(env);
373#elif defined(TARGET_ARM)
374 do_interrupt(env);
375#elif defined(TARGET_SH4)
376 do_interrupt(env);
377#elif defined(TARGET_ALPHA)
378 do_interrupt(env);
379#elif defined(TARGET_CRIS)
380 do_interrupt(env);
381#elif defined(TARGET_M68K)
382 do_interrupt(0);
383#endif
384 env->exception_index = -1;
385#endif
386 }
387 }
388
389# ifndef VBOX
390 if (kvm_enabled()) {
391 kvm_cpu_exec(env);
392 longjmp(env->jmp_env, 1);
393 }
394# endif /* !VBOX */
395
396 next_tb = 0; /* force lookup of first TB */
397 for(;;) {
398 interrupt_request = env->interrupt_request;
399 if (unlikely(interrupt_request)) {
400 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
401 /* Mask out external interrupts for this step. */
402 interrupt_request &= ~(CPU_INTERRUPT_HARD |
403 CPU_INTERRUPT_FIQ |
404 CPU_INTERRUPT_SMI |
405 CPU_INTERRUPT_NMI);
406 }
407 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
408 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
409 env->exception_index = EXCP_DEBUG;
410 cpu_loop_exit();
411 }
412#if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
413 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
414 defined(TARGET_MICROBLAZE)
415 if (interrupt_request & CPU_INTERRUPT_HALT) {
416 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
417 env->halted = 1;
418 env->exception_index = EXCP_HLT;
419 cpu_loop_exit();
420 }
421#endif
422#if defined(TARGET_I386)
423# ifdef VBOX
424 /* Memory registration may post a tlb flush request, process it ASAP. */
425 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_FLUSH_TLB)) {
426 tlb_flush(env, true); /* (clears the flush flag) */
427 }
428
429 /* Single instruction exec request, we execute it and return (one way or the other).
430 The caller will always reschedule after doing this operation! */
431 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR)
432 {
433 /* not in flight are we? (if we are, we trapped) */
434 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
435 {
436 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT);
437 env->exception_index = EXCP_SINGLE_INSTR;
438 if (emulate_single_instr(env) == -1)
439 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip));
440
441 /* When we receive an external interrupt during execution of this single
442 instruction, then we should stay here. We will leave when we're ready
443 for raw-mode or when interrupted by pending EMT requests. */
444 interrupt_request = env->interrupt_request; /* reload this! */
445 if ( !(interrupt_request & CPU_INTERRUPT_HARD)
446 || !(env->eflags & IF_MASK)
447 || (env->hflags & HF_INHIBIT_IRQ_MASK)
448 || (env->state & CPU_RAW_HM)
449 )
450 {
451 env->exception_index = ret = EXCP_SINGLE_INSTR;
452 cpu_loop_exit();
453 }
454 }
455 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */
456 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR);
457# ifdef IEM_VERIFICATION_MODE
458 env->exception_index = ret = EXCP_SINGLE_INSTR;
459 cpu_loop_exit();
460# endif
461 }
462# endif /* VBOX */
463
464# ifndef VBOX /** @todo reconcile our code with the following... */
465 if (interrupt_request & CPU_INTERRUPT_INIT) {
466 svm_check_intercept(SVM_EXIT_INIT);
467 do_cpu_init(env);
468 env->exception_index = EXCP_HALTED;
469 cpu_loop_exit();
470 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
471 do_cpu_sipi(env);
472 } else if (env->hflags2 & HF2_GIF_MASK) {
473 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
474 !(env->hflags & HF_SMM_MASK)) {
475 svm_check_intercept(SVM_EXIT_SMI);
476 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
477 do_smm_enter();
478 next_tb = 0;
479 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
480 !(env->hflags2 & HF2_NMI_MASK)) {
481 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
482 env->hflags2 |= HF2_NMI_MASK;
483 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
484 next_tb = 0;
485 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
486 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
487 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
488 next_tb = 0;
489 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
490 (((env->hflags2 & HF2_VINTR_MASK) &&
491 (env->hflags2 & HF2_HIF_MASK)) ||
492 (!(env->hflags2 & HF2_VINTR_MASK) &&
493 (env->eflags & IF_MASK &&
494 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
495 int intno;
496 svm_check_intercept(SVM_EXIT_INTR);
497 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
498 intno = cpu_get_pic_interrupt(env);
499 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
500#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
501#undef env
502 env = cpu_single_env;
503#define env cpu_single_env
504#endif
505 do_interrupt(intno, 0, 0, 0, 1);
506 /* ensure that no TB jump will be modified as
507 the program flow was changed */
508 next_tb = 0;
509#if !defined(CONFIG_USER_ONLY)
510 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
511 (env->eflags & IF_MASK) &&
512 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
513 int intno;
514 /* FIXME: this should respect TPR */
515 svm_check_intercept(SVM_EXIT_VINTR);
516 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
517 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
518 do_interrupt(intno, 0, 0, 0, 1);
519 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
520 next_tb = 0;
521#endif
522 }
523 }
524# else /* VBOX */
525 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING);
526 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
527 !(env->hflags & HF_SMM_MASK)) {
528 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
529 do_smm_enter();
530 next_tb = 0;
531 }
532 else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
533 (env->eflags & IF_MASK) &&
534 !(env->hflags & HF_INHIBIT_IRQ_MASK))
535 {
536 /* if hardware interrupt pending, we execute it */
537 int intno;
538 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD);
539 intno = cpu_get_pic_interrupt(env);
540 if (intno >= 0)
541 {
542 Log(("do_interrupt %d\n", intno));
543 do_interrupt(intno, 0, 0, 0, 1);
544 }
545 /* ensure that no TB jump will be modified as
546 the program flow was changed */
547 next_tb = 0;
548 }
549# endif /* VBOX */
550#elif defined(TARGET_PPC)
551#if 0
552 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
553 cpu_reset(env);
554 }
555#endif
556 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 ppc_hw_interrupt(env);
558 if (env->pending_interrupts == 0)
559 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
560 next_tb = 0;
561 }
562#elif defined(TARGET_MICROBLAZE)
563 if ((interrupt_request & CPU_INTERRUPT_HARD)
564 && (env->sregs[SR_MSR] & MSR_IE)
565 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
566 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
567 env->exception_index = EXCP_IRQ;
568 do_interrupt(env);
569 next_tb = 0;
570 }
571#elif defined(TARGET_MIPS)
572 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
573 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
574 (env->CP0_Status & (1 << CP0St_IE)) &&
575 !(env->CP0_Status & (1 << CP0St_EXL)) &&
576 !(env->CP0_Status & (1 << CP0St_ERL)) &&
577 !(env->hflags & MIPS_HFLAG_DM)) {
578 /* Raise it */
579 env->exception_index = EXCP_EXT_INTERRUPT;
580 env->error_code = 0;
581 do_interrupt(env);
582 next_tb = 0;
583 }
584#elif defined(TARGET_SPARC)
585 if (interrupt_request & CPU_INTERRUPT_HARD) {
586 if (cpu_interrupts_enabled(env) &&
587 env->interrupt_index > 0) {
588 int pil = env->interrupt_index & 0xf;
589 int type = env->interrupt_index & 0xf0;
590
591 if (((type == TT_EXTINT) &&
592 cpu_pil_allowed(env, pil)) ||
593 type != TT_EXTINT) {
594 env->exception_index = env->interrupt_index;
595 do_interrupt(env);
596 next_tb = 0;
597 }
598 }
599 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
600 //do_interrupt(0, 0, 0, 0, 0);
601 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
602 }
603#elif defined(TARGET_ARM)
604 if (interrupt_request & CPU_INTERRUPT_FIQ
605 && !(env->uncached_cpsr & CPSR_F)) {
606 env->exception_index = EXCP_FIQ;
607 do_interrupt(env);
608 next_tb = 0;
609 }
610 /* ARMv7-M interrupt return works by loading a magic value
611 into the PC. On real hardware the load causes the
612 return to occur. The qemu implementation performs the
613 jump normally, then does the exception return when the
614 CPU tries to execute code at the magic address.
615 This will cause the magic PC value to be pushed to
616 the stack if an interrupt occured at the wrong time.
617 We avoid this by disabling interrupts when
618 pc contains a magic address. */
619 if (interrupt_request & CPU_INTERRUPT_HARD
620 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
621 || !(env->uncached_cpsr & CPSR_I))) {
622 env->exception_index = EXCP_IRQ;
623 do_interrupt(env);
624 next_tb = 0;
625 }
626#elif defined(TARGET_SH4)
627 if (interrupt_request & CPU_INTERRUPT_HARD) {
628 do_interrupt(env);
629 next_tb = 0;
630 }
631#elif defined(TARGET_ALPHA)
632 if (interrupt_request & CPU_INTERRUPT_HARD) {
633 do_interrupt(env);
634 next_tb = 0;
635 }
636#elif defined(TARGET_CRIS)
637 if (interrupt_request & CPU_INTERRUPT_HARD
638 && (env->pregs[PR_CCS] & I_FLAG)
639 && !env->locked_irq) {
640 env->exception_index = EXCP_IRQ;
641 do_interrupt(env);
642 next_tb = 0;
643 }
644 if (interrupt_request & CPU_INTERRUPT_NMI
645 && (env->pregs[PR_CCS] & M_FLAG)) {
646 env->exception_index = EXCP_NMI;
647 do_interrupt(env);
648 next_tb = 0;
649 }
650#elif defined(TARGET_M68K)
651 if (interrupt_request & CPU_INTERRUPT_HARD
652 && ((env->sr & SR_I) >> SR_I_SHIFT)
653 < env->pending_level) {
654 /* Real hardware gets the interrupt vector via an
655 IACK cycle at this point. Current emulated
656 hardware doesn't rely on this, so we
657 provide/save the vector when the interrupt is
658 first signalled. */
659 env->exception_index = env->pending_vector;
660 do_interrupt(1);
661 next_tb = 0;
662 }
663#endif
664 /* Don't use the cached interupt_request value,
665 do_interrupt may have updated the EXITTB flag. */
666 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
667#ifndef VBOX
668 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
669#else /* VBOX */
670 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB);
671#endif /* VBOX */
672 /* ensure that no TB jump will be modified as
673 the program flow was changed */
674 next_tb = 0;
675 }
676#ifdef VBOX
677 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING);
678 if (interrupt_request & CPU_INTERRUPT_RC) {
679 env->exception_index = EXCP_RC;
680 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC);
681 ret = env->exception_index;
682 cpu_loop_exit();
683 }
684 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) {
685 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT));
686 env->exit_request = 1;
687 }
688#endif
689 }
690 if (unlikely(env->exit_request)) {
691 env->exit_request = 0;
692 env->exception_index = EXCP_INTERRUPT;
693 cpu_loop_exit();
694 }
695
696#ifdef VBOX
697 /*
698 * Check if we the CPU state allows us to execute the code in raw-mode.
699 */
700 RAWEx_ProfileStart(env, STATS_RAW_CHECK);
701 if (remR3CanExecuteRaw(env,
702 env->eip + env->segs[R_CS].base,
703 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)),
704 &env->exception_index))
705 {
706 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
707 ret = env->exception_index;
708 cpu_loop_exit();
709 }
710 RAWEx_ProfileStop(env, STATS_RAW_CHECK);
711#endif /* VBOX */
712
713#if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
714 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
715 /* restore flags in standard format */
716#if defined(TARGET_I386)
717 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
718 log_cpu_state(env, X86_DUMP_CCOP);
719 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
720#elif defined(TARGET_M68K)
721 cpu_m68k_flush_flags(env, env->cc_op);
722 env->cc_op = CC_OP_FLAGS;
723 env->sr = (env->sr & 0xffe0)
724 | env->cc_dest | (env->cc_x << 4);
725 log_cpu_state(env, 0);
726#else
727 log_cpu_state(env, 0);
728#endif
729 }
730#endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
731#ifdef VBOX
732 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP);
733#endif /*VBOX*/
734 spin_lock(&tb_lock);
735 tb = tb_find_fast();
736 /* Note: we do it here to avoid a gcc bug on Mac OS X when
737 doing it in tb_find_slow */
738 if (tb_invalidated_flag) {
739 /* as some TB could have been invalidated because
740 of memory exceptions while generating the code, we
741 must recompute the hash index here */
742 next_tb = 0;
743 tb_invalidated_flag = 0;
744 }
745#ifdef CONFIG_DEBUG_EXEC
746 qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
747 (void *)tb->tc_ptr, tb->pc,
748 lookup_symbol(tb->pc));
749#endif
750 /* see if we can patch the calling TB. When the TB
751 spans two pages, we cannot safely do a direct
752 jump. */
753#ifndef VBOX
754 if (next_tb != 0 && tb->page_addr[1] == -1) {
755#else /* VBOX */
756 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) {
757#endif /* VBOX */
758 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
759 }
760 spin_unlock(&tb_lock);
761#ifdef VBOX
762 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP);
763#endif
764
765 /* cpu_interrupt might be called while translating the
766 TB, but before it is linked into a potentially
767 infinite loop and becomes env->current_tb. Avoid
768 starting execution if there is a pending interrupt. */
769 env->current_tb = tb;
770 barrier();
771 if (likely(!env->exit_request)) {
772 tc_ptr = tb->tc_ptr;
773 /* execute the generated code */
774#ifdef VBOX
775 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE);
776#endif
777#if defined(__sparc__) && !defined(CONFIG_SOLARIS)
778#undef env
779 env = cpu_single_env;
780#define env cpu_single_env
781#endif
782 Log5(("REM: tb=%p tc_ptr=%p %04x:%08RGv\n", tb, tc_ptr, env->segs[R_CS].selector, (RTGCPTR)env->eip));
783#if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM)
784 tcg_qemu_tb_exec(tc_ptr, next_tb);
785#else
786 next_tb = tcg_qemu_tb_exec(tc_ptr);
787#endif
788 if (next_tb)
789 Log5(("REM: next_tb=%p %04x:%08RGv\n", next_tb, env->segs[R_CS].selector, (RTGCPTR)env->eip));
790#ifdef VBOX
791 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE);
792#endif
793 if ((next_tb & 3) == 2) {
794 /* Instruction counter expired. */
795 int insns_left;
796 tb = (TranslationBlock *)(uintptr_t)(next_tb & ~3);
797 /* Restore PC. */
798 cpu_pc_from_tb(env, tb);
799 insns_left = env->icount_decr.u32;
800 if (env->icount_extra && insns_left >= 0) {
801 /* Refill decrementer and continue execution. */
802 env->icount_extra += insns_left;
803 if (env->icount_extra > 0xffff) {
804 insns_left = 0xffff;
805 } else {
806 insns_left = env->icount_extra;
807 }
808 env->icount_extra -= insns_left;
809 env->icount_decr.u16.low = insns_left;
810 } else {
811 if (insns_left > 0) {
812 /* Execute remaining instructions. */
813 cpu_exec_nocache(insns_left, tb);
814 }
815 env->exception_index = EXCP_INTERRUPT;
816 next_tb = 0;
817 cpu_loop_exit();
818 }
819 }
820 }
821 env->current_tb = NULL;
822 /* reset soft MMU for next block (it can currently
823 only be set by a memory fault) */
824 } /* for(;;) */
825 }
826#ifdef VBOX_HIGH_RES_TIMERS_HACK
827 /* NULL the current_tb here so cpu_interrupt() doesn't do anything
828 unnecessary (like crashing during emulate single instruction).
829 Note! Don't use env1->pVM here, the code wouldn't run with
830 gcc-4.4/amd64 anymore, see #3883. */
831 env->current_tb = NULL;
832 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC
833 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT))
834 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER)
835 || TMTimerPollBool(env->pVM, env->pVCpu)) ) {
836 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER);
837 remR3ProfileStart(STATS_QEMU_RUN_TIMERS);
838 TMR3TimerQueuesDo(env->pVM);
839 remR3ProfileStop(STATS_QEMU_RUN_TIMERS);
840 }
841#endif
842 } /* for(;;) */
843
844
845#if defined(TARGET_I386)
846 /* restore flags in standard format */
847 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
848#elif defined(TARGET_ARM)
849 /* XXX: Save/restore host fpu exception state?. */
850#elif defined(TARGET_SPARC)
851#elif defined(TARGET_PPC)
852#elif defined(TARGET_M68K)
853 cpu_m68k_flush_flags(env, env->cc_op);
854 env->cc_op = CC_OP_FLAGS;
855 env->sr = (env->sr & 0xffe0)
856 | env->cc_dest | (env->cc_x << 4);
857#elif defined(TARGET_MICROBLAZE)
858#elif defined(TARGET_MIPS)
859#elif defined(TARGET_SH4)
860#elif defined(TARGET_ALPHA)
861#elif defined(TARGET_CRIS)
862#elif defined(TARGET_S390X)
863 /* XXXXX */
864#else
865#error unsupported target CPU
866#endif
867
868 /* restore global registers */
869 barrier();
870 env = (void *) saved_env_reg;
871
872# ifndef VBOX /* we might be using elsewhere, we only have one. */
873 /* fail safe : never use cpu_single_env outside cpu_exec() */
874 cpu_single_env = NULL;
875# endif
876 return ret;
877}
878
879/* must only be called from the generated code as an exception can be
880 generated */
881void tb_invalidate_page_range(target_ulong start, target_ulong end)
882{
883 /* XXX: cannot enable it yet because it yields to MMU exception
884 where NIP != read address on PowerPC */
885#if 0
886 target_ulong phys_addr;
887 phys_addr = get_phys_addr_code(env, start);
888 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
889#endif
890}
891
892#if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
893
894void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
895{
896 CPUX86State *saved_env;
897
898 saved_env = env;
899 env = s;
900 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
901 selector &= 0xffff;
902 cpu_x86_load_seg_cache(env, seg_reg, selector,
903 (selector << 4), 0xffff, 0);
904 } else {
905 helper_load_seg(seg_reg, selector);
906 }
907 env = saved_env;
908}
909
910void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
911{
912 CPUX86State *saved_env;
913
914 saved_env = env;
915 env = s;
916
917 helper_fsave(ptr, data32);
918
919 env = saved_env;
920}
921
922void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
923{
924 CPUX86State *saved_env;
925
926 saved_env = env;
927 env = s;
928
929 helper_frstor(ptr, data32);
930
931 env = saved_env;
932}
933
934#endif /* TARGET_I386 */
935
936#if !defined(CONFIG_SOFTMMU)
937
938#if defined(TARGET_I386)
939#define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
940#else
941#define EXCEPTION_ACTION cpu_loop_exit()
942#endif
943
944/* 'pc' is the host PC at which the exception was raised. 'address' is
945 the effective address of the memory exception. 'is_write' is 1 if a
946 write caused the exception and otherwise 0'. 'old_set' is the
947 signal set which should be restored */
948static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
949 int is_write, sigset_t *old_set,
950 void *puc)
951{
952 TranslationBlock *tb;
953 int ret;
954
955 if (cpu_single_env)
956 env = cpu_single_env; /* XXX: find a correct solution for multithread */
957#if defined(DEBUG_SIGNAL)
958 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
959 pc, address, is_write, *(unsigned long *)old_set);
960#endif
961 /* XXX: locking issue */
962 if (is_write && page_unprotect(h2g(address), pc, puc)) {
963 return 1;
964 }
965
966 /* see if it is an MMU fault */
967 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
968 if (ret < 0)
969 return 0; /* not an MMU fault */
970 if (ret == 0)
971 return 1; /* the MMU fault was handled without causing real CPU fault */
972 /* now we have a real cpu fault */
973 tb = tb_find_pc(pc);
974 if (tb) {
975 /* the PC is inside the translated code. It means that we have
976 a virtual CPU fault */
977 cpu_restore_state(tb, env, pc, puc);
978 }
979
980 /* we restore the process signal mask as the sigreturn should
981 do it (XXX: use sigsetjmp) */
982 sigprocmask(SIG_SETMASK, old_set, NULL);
983 EXCEPTION_ACTION;
984
985 /* never comes here */
986 return 1;
987}
988
989#if defined(__i386__)
990
991#if defined(__APPLE__)
992# include <sys/ucontext.h>
993
994# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
995# define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
996# define ERROR_sig(context) ((context)->uc_mcontext->es.err)
997# define MASK_sig(context) ((context)->uc_sigmask)
998#elif defined (__NetBSD__)
999# include <ucontext.h>
1000
1001# define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
1002# define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1003# define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1004# define MASK_sig(context) ((context)->uc_sigmask)
1005#elif defined (__FreeBSD__) || defined(__DragonFly__)
1006# include <ucontext.h>
1007
1008# define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
1009# define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1010# define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1011# define MASK_sig(context) ((context)->uc_sigmask)
1012#elif defined(__OpenBSD__)
1013# define EIP_sig(context) ((context)->sc_eip)
1014# define TRAP_sig(context) ((context)->sc_trapno)
1015# define ERROR_sig(context) ((context)->sc_err)
1016# define MASK_sig(context) ((context)->sc_mask)
1017#else
1018# define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1019# define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1020# define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1021# define MASK_sig(context) ((context)->uc_sigmask)
1022#endif
1023
1024int cpu_signal_handler(int host_signum, void *pinfo,
1025 void *puc)
1026{
1027 siginfo_t *info = pinfo;
1028#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1029 ucontext_t *uc = puc;
1030#elif defined(__OpenBSD__)
1031 struct sigcontext *uc = puc;
1032#else
1033 struct ucontext *uc = puc;
1034#endif
1035 uintptr_t pc;
1036 int trapno;
1037
1038#ifndef REG_EIP
1039/* for glibc 2.1 */
1040#define REG_EIP EIP
1041#define REG_ERR ERR
1042#define REG_TRAPNO TRAPNO
1043#endif
1044 pc = EIP_sig(uc);
1045 trapno = TRAP_sig(uc);
1046 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1047 trapno == 0xe ?
1048 (ERROR_sig(uc) >> 1) & 1 : 0,
1049 &MASK_sig(uc), puc);
1050}
1051
1052#elif defined(__x86_64__)
1053
1054#ifdef __NetBSD__
1055#define PC_sig(context) _UC_MACHINE_PC(context)
1056#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1057#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1058#define MASK_sig(context) ((context)->uc_sigmask)
1059#elif defined(__OpenBSD__)
1060#define PC_sig(context) ((context)->sc_rip)
1061#define TRAP_sig(context) ((context)->sc_trapno)
1062#define ERROR_sig(context) ((context)->sc_err)
1063#define MASK_sig(context) ((context)->sc_mask)
1064#elif defined (__FreeBSD__) || defined(__DragonFly__)
1065#include <ucontext.h>
1066
1067#define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
1068#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
1069#define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
1070#define MASK_sig(context) ((context)->uc_sigmask)
1071#else
1072#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1073#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1074#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1075#define MASK_sig(context) ((context)->uc_sigmask)
1076#endif
1077
1078int cpu_signal_handler(int host_signum, void *pinfo,
1079 void *puc)
1080{
1081 siginfo_t *info = pinfo;
1082 uintptr_t pc;
1083#if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
1084 ucontext_t *uc = puc;
1085#elif defined(__OpenBSD__)
1086 struct sigcontext *uc = puc;
1087#else
1088 struct ucontext *uc = puc;
1089#endif
1090
1091 pc = PC_sig(uc);
1092 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1093 TRAP_sig(uc) == 0xe ?
1094 (ERROR_sig(uc) >> 1) & 1 : 0,
1095 &MASK_sig(uc), puc);
1096}
1097
1098#elif defined(_ARCH_PPC)
1099
1100/***********************************************************************
1101 * signal context platform-specific definitions
1102 * From Wine
1103 */
1104#ifdef linux
1105/* All Registers access - only for local access */
1106# define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1107/* Gpr Registers access */
1108# define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1109# define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1110# define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1111# define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1112# define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1113# define LR_sig(context) REG_sig(link, context) /* Link register */
1114# define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1115/* Float Registers access */
1116# define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1117# define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1118/* Exception Registers access */
1119# define DAR_sig(context) REG_sig(dar, context)
1120# define DSISR_sig(context) REG_sig(dsisr, context)
1121# define TRAP_sig(context) REG_sig(trap, context)
1122#endif /* linux */
1123
1124#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1125#include <ucontext.h>
1126# define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
1127# define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
1128# define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
1129# define XER_sig(context) ((context)->uc_mcontext.mc_xer)
1130# define LR_sig(context) ((context)->uc_mcontext.mc_lr)
1131# define CR_sig(context) ((context)->uc_mcontext.mc_cr)
1132/* Exception Registers access */
1133# define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
1134# define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
1135# define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
1136#endif /* __FreeBSD__|| __FreeBSD_kernel__ */
1137
1138#ifdef __APPLE__
1139# include <sys/ucontext.h>
1140typedef struct ucontext SIGCONTEXT;
1141/* All Registers access - only for local access */
1142# define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1143# define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1144# define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1145# define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1146/* Gpr Registers access */
1147# define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1148# define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1149# define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1150# define CTR_sig(context) REG_sig(ctr, context)
1151# define XER_sig(context) REG_sig(xer, context) /* Link register */
1152# define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1153# define CR_sig(context) REG_sig(cr, context) /* Condition register */
1154/* Float Registers access */
1155# define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1156# define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1157/* Exception Registers access */
1158# define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1159# define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1160# define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1161#endif /* __APPLE__ */
1162
1163int cpu_signal_handler(int host_signum, void *pinfo,
1164 void *puc)
1165{
1166 siginfo_t *info = pinfo;
1167#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
1168 ucontext_t *uc = puc;
1169#else
1170 struct ucontext *uc = puc;
1171#endif
1172 uintptr_t pc;
1173 int is_write;
1174
1175 pc = IAR_sig(uc);
1176 is_write = 0;
1177#if 0
1178 /* ppc 4xx case */
1179 if (DSISR_sig(uc) & 0x00800000)
1180 is_write = 1;
1181#else
1182 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1183 is_write = 1;
1184#endif
1185 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1186 is_write, &uc->uc_sigmask, puc);
1187}
1188
1189#elif defined(__alpha__)
1190
1191int cpu_signal_handler(int host_signum, void *pinfo,
1192 void *puc)
1193{
1194 siginfo_t *info = pinfo;
1195 struct ucontext *uc = puc;
1196 uint32_t *pc = uc->uc_mcontext.sc_pc;
1197 uint32_t insn = *pc;
1198 int is_write = 0;
1199
1200 /* XXX: need kernel patch to get write flag faster */
1201 switch (insn >> 26) {
1202 case 0x0d: // stw
1203 case 0x0e: // stb
1204 case 0x0f: // stq_u
1205 case 0x24: // stf
1206 case 0x25: // stg
1207 case 0x26: // sts
1208 case 0x27: // stt
1209 case 0x2c: // stl
1210 case 0x2d: // stq
1211 case 0x2e: // stl_c
1212 case 0x2f: // stq_c
1213 is_write = 1;
1214 }
1215
1216 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1217 is_write, &uc->uc_sigmask, puc);
1218}
1219#elif defined(__sparc__)
1220
1221int cpu_signal_handler(int host_signum, void *pinfo,
1222 void *puc)
1223{
1224 siginfo_t *info = pinfo;
1225 int is_write;
1226 uint32_t insn;
1227#if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1228 uint32_t *regs = (uint32_t *)(info + 1);
1229 void *sigmask = (regs + 20);
1230 /* XXX: is there a standard glibc define ? */
1231 uintptr_t pc = regs[1];
1232#else
1233#ifdef __linux__
1234 struct sigcontext *sc = puc;
1235 uintptr_t pc = sc->sigc_regs.tpc;
1236 void *sigmask = (void *)sc->sigc_mask;
1237#elif defined(__OpenBSD__)
1238 struct sigcontext *uc = puc;
1239 uintptr_t pc = uc->sc_pc;
1240 void *sigmask = (void *)(uintptr_t)uc->sc_mask;
1241#endif
1242#endif
1243
1244 /* XXX: need kernel patch to get write flag faster */
1245 is_write = 0;
1246 insn = *(uint32_t *)pc;
1247 if ((insn >> 30) == 3) {
1248 switch((insn >> 19) & 0x3f) {
1249 case 0x05: // stb
1250 case 0x15: // stba
1251 case 0x06: // sth
1252 case 0x16: // stha
1253 case 0x04: // st
1254 case 0x14: // sta
1255 case 0x07: // std
1256 case 0x17: // stda
1257 case 0x0e: // stx
1258 case 0x1e: // stxa
1259 case 0x24: // stf
1260 case 0x34: // stfa
1261 case 0x27: // stdf
1262 case 0x37: // stdfa
1263 case 0x26: // stqf
1264 case 0x36: // stqfa
1265 case 0x25: // stfsr
1266 case 0x3c: // casa
1267 case 0x3e: // casxa
1268 is_write = 1;
1269 break;
1270 }
1271 }
1272 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1273 is_write, sigmask, NULL);
1274}
1275
1276#elif defined(__arm__)
1277
1278int cpu_signal_handler(int host_signum, void *pinfo,
1279 void *puc)
1280{
1281 siginfo_t *info = pinfo;
1282 struct ucontext *uc = puc;
1283 uintptr_t pc;
1284 int is_write;
1285
1286#if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1287 pc = uc->uc_mcontext.gregs[R15];
1288#else
1289 pc = uc->uc_mcontext.arm_pc;
1290#endif
1291 /* XXX: compute is_write */
1292 is_write = 0;
1293 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1294 is_write,
1295 &uc->uc_sigmask, puc);
1296}
1297
1298#elif defined(__mc68000)
1299
1300int cpu_signal_handler(int host_signum, void *pinfo,
1301 void *puc)
1302{
1303 siginfo_t *info = pinfo;
1304 struct ucontext *uc = puc;
1305 uintptr_t pc;
1306 int is_write;
1307
1308 pc = uc->uc_mcontext.gregs[16];
1309 /* XXX: compute is_write */
1310 is_write = 0;
1311 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1312 is_write,
1313 &uc->uc_sigmask, puc);
1314}
1315
1316#elif defined(__ia64)
1317
1318#ifndef __ISR_VALID
1319 /* This ought to be in <bits/siginfo.h>... */
1320# define __ISR_VALID 1
1321#endif
1322
1323int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1324{
1325 siginfo_t *info = pinfo;
1326 struct ucontext *uc = puc;
1327 uintptr_t ip;
1328 int is_write = 0;
1329
1330 ip = uc->uc_mcontext.sc_ip;
1331 switch (host_signum) {
1332 case SIGILL:
1333 case SIGFPE:
1334 case SIGSEGV:
1335 case SIGBUS:
1336 case SIGTRAP:
1337 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1338 /* ISR.W (write-access) is bit 33: */
1339 is_write = (info->si_isr >> 33) & 1;
1340 break;
1341
1342 default:
1343 break;
1344 }
1345 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1346 is_write,
1347 (sigset_t *)&uc->uc_sigmask, puc);
1348}
1349
1350#elif defined(__s390__)
1351
1352int cpu_signal_handler(int host_signum, void *pinfo,
1353 void *puc)
1354{
1355 siginfo_t *info = pinfo;
1356 struct ucontext *uc = puc;
1357 uintptr_t pc;
1358 uint16_t *pinsn;
1359 int is_write = 0;
1360
1361 pc = uc->uc_mcontext.psw.addr;
1362
1363 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1364 of the normal 2 arguments. The 3rd argument contains the "int_code"
1365 from the hardware which does in fact contain the is_write value.
1366 The rt signal handler, as far as I can tell, does not give this value
1367 at all. Not that we could get to it from here even if it were. */
1368 /* ??? This is not even close to complete, since it ignores all
1369 of the read-modify-write instructions. */
1370 pinsn = (uint16_t *)pc;
1371 switch (pinsn[0] >> 8) {
1372 case 0x50: /* ST */
1373 case 0x42: /* STC */
1374 case 0x40: /* STH */
1375 is_write = 1;
1376 break;
1377 case 0xc4: /* RIL format insns */
1378 switch (pinsn[0] & 0xf) {
1379 case 0xf: /* STRL */
1380 case 0xb: /* STGRL */
1381 case 0x7: /* STHRL */
1382 is_write = 1;
1383 }
1384 break;
1385 case 0xe3: /* RXY format insns */
1386 switch (pinsn[2] & 0xff) {
1387 case 0x50: /* STY */
1388 case 0x24: /* STG */
1389 case 0x72: /* STCY */
1390 case 0x70: /* STHY */
1391 case 0x8e: /* STPQ */
1392 case 0x3f: /* STRVH */
1393 case 0x3e: /* STRV */
1394 case 0x2f: /* STRVG */
1395 is_write = 1;
1396 }
1397 break;
1398 }
1399 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1400 is_write, &uc->uc_sigmask, puc);
1401}
1402
1403#elif defined(__mips__)
1404
1405int cpu_signal_handler(int host_signum, void *pinfo,
1406 void *puc)
1407{
1408 siginfo_t *info = pinfo;
1409 struct ucontext *uc = puc;
1410 greg_t pc = uc->uc_mcontext.pc;
1411 int is_write;
1412
1413 /* XXX: compute is_write */
1414 is_write = 0;
1415 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1416 is_write, &uc->uc_sigmask, puc);
1417}
1418
1419#elif defined(__hppa__)
1420
1421int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1423{
1424 struct siginfo *info = pinfo;
1425 struct ucontext *uc = puc;
1426 uintptr_t pc = uc->uc_mcontext.sc_iaoq[0];
1427 uint32_t insn = *(uint32_t *)pc;
1428 int is_write = 0;
1429
1430 /* XXX: need kernel patch to get write flag faster. */
1431 switch (insn >> 26) {
1432 case 0x1a: /* STW */
1433 case 0x19: /* STH */
1434 case 0x18: /* STB */
1435 case 0x1b: /* STWM */
1436 is_write = 1;
1437 break;
1438
1439 case 0x09: /* CSTWX, FSTWX, FSTWS */
1440 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1441 /* Distinguish from coprocessor load ... */
1442 is_write = (insn >> 9) & 1;
1443 break;
1444
1445 case 0x03:
1446 switch ((insn >> 6) & 15) {
1447 case 0xa: /* STWS */
1448 case 0x9: /* STHS */
1449 case 0x8: /* STBS */
1450 case 0xe: /* STWAS */
1451 case 0xc: /* STBYS */
1452 is_write = 1;
1453 }
1454 break;
1455 }
1456
1457 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1458 is_write, &uc->uc_sigmask, puc);
1459}
1460
1461#else
1462
1463#error host CPU specific signal handler needed
1464
1465#endif
1466
1467#endif /* !defined(CONFIG_SOFTMMU) */
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette