vbox的更動 37689 路徑 trunk/src/recompiler
- 時間撮記:
- 2011-6-29 下午04:01:23 (13 年 以前)
- 位置:
- trunk/src/recompiler
- 檔案:
-
- 新增 3 筆資料
- 刪除 1 筆資料
- 修改 49 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/recompiler/README.vbox
r36212 r37689 1 QEMU is based on v0.1 1.1 (35bfc7324e2e6946c4113ada5db30553a1a7c40b)1 QEMU is based on v0.13.0 (6ed912999d6ef636a5be5ccb266d7d3c0f0310b4) 2 2 from git://git.savannah.nongnu.org/qemu.git. 3 3 -
trunk/src/recompiler/Sun/config-host.h
r37677 r37689 41 41 # endif 42 42 #endif 43 #define QEMU_VERSION "0.1 2.5"43 #define QEMU_VERSION "0.13.0" 44 44 #define CONFIG_UNAME_RELEASE "" 45 45 #define CONFIG_QEMU_SHAREDIR "." -
trunk/src/recompiler/VBoxRecompiler.c
r37676 r37689 26 26 #include "cpu.h" 27 27 #include "exec-all.h" 28 #include "ioport.h" 28 29 29 30 #include <VBox/vmm/rem.h> … … 60 61 extern void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 61 62 extern void tlb_flush_page(CPUX86State *env, target_ulong addr); 62 extern void tlb_flush(CPU State *env, int flush_global);63 extern void tlb_flush(CPUX86State *env, int flush_global); 63 64 extern void sync_seg(CPUX86State *env1, int seg_reg, int selector); 64 65 extern void sync_ldtr(CPUX86State *env1, int selector); … … 466 467 } 467 468 468 469 /** 470 * Initializes phys_ram_dirty and phys_ram_dirty_size. 469 /** 470 * Initializes ram_list.phys_dirty and ram_list.phys_dirty_size. 471 471 * 472 472 * @returns VBox status code. … … 478 478 int rc = VINF_SUCCESS; 479 479 RTGCPHYS cb; 480 481 AssertLogRelReturn(QLIST_EMPTY(&ram_list.blocks), VERR_INTERNAL_ERROR_2); 480 482 481 483 cb = pVM->rem.s.GCPhysLastRam + 1; … … 483 485 ("GCPhysLastRam=%RGp - out of range\n", pVM->rem.s.GCPhysLastRam), 484 486 VERR_OUT_OF_RANGE); 485 phys_ram_dirty_size = cb >> PAGE_SHIFT; 486 AssertMsg(((RTGCPHYS)phys_ram_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb)); 487 488 ram_list.phys_dirty_size = cb >> PAGE_SHIFT; 489 AssertMsg(((RTGCPHYS)ram_list.phys_dirty_size << PAGE_SHIFT) == cb, ("%RGp\n", cb)); 487 490 488 491 if (!fGuarded) 489 492 { 490 phys_ram_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, phys_ram_dirty_size);491 AssertLogRelMsgReturn( phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", phys_ram_dirty_size), VERR_NO_MEMORY);493 ram_list.phys_dirty = MMR3HeapAlloc(pVM, MM_TAG_REM, ram_list.phys_dirty_size); 494 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", ram_list.phys_dirty_size), VERR_NO_MEMORY); 492 495 } 493 496 else … … 496 499 * Fill it up the nearest 4GB RAM and leave at least _64KB of guard after it. 497 500 */ 498 uint32_t cbBitmapAligned = RT_ALIGN_32( phys_ram_dirty_size, PAGE_SIZE);499 uint32_t cbBitmapFull = RT_ALIGN_32( phys_ram_dirty_size, (_4G >> PAGE_SHIFT));501 uint32_t cbBitmapAligned = RT_ALIGN_32(ram_list.phys_dirty_size, PAGE_SIZE); 502 uint32_t cbBitmapFull = RT_ALIGN_32(ram_list.phys_dirty_size, (_4G >> PAGE_SHIFT)); 500 503 if (cbBitmapFull == cbBitmapAligned) 501 504 cbBitmapFull += _4G >> PAGE_SHIFT; … … 503 506 cbBitmapFull += _64K; 504 507 505 phys_ram_dirty = RTMemPageAlloc(cbBitmapFull);506 AssertLogRelMsgReturn( phys_ram_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY);507 508 rc = RTMemProtect( phys_ram_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE);508 ram_list.phys_dirty = RTMemPageAlloc(cbBitmapFull); 509 AssertLogRelMsgReturn(ram_list.phys_dirty, ("Failed to allocate %u bytes of dirty page map bytes\n", cbBitmapFull), VERR_NO_MEMORY); 510 511 rc = RTMemProtect(ram_list.phys_dirty + cbBitmapAligned, cbBitmapFull - cbBitmapAligned, RTMEM_PROT_NONE); 509 512 if (RT_FAILURE(rc)) 510 513 { 511 RTMemPageFree( phys_ram_dirty, cbBitmapFull);514 RTMemPageFree(ram_list.phys_dirty, cbBitmapFull); 512 515 AssertLogRelRCReturn(rc, rc); 513 516 } 514 517 515 phys_ram_dirty += cbBitmapAligned - phys_ram_dirty_size;518 ram_list.phys_dirty += cbBitmapAligned - ram_list.phys_dirty_size; 516 519 } 517 520 518 521 /* initialize it. */ 519 memset( phys_ram_dirty, 0xff, phys_ram_dirty_size);522 memset(ram_list.phys_dirty, 0xff, ram_list.phys_dirty_size); 520 523 return rc; 521 524 } … … 1399 1402 * @remark This function must be kept in perfect sync with the scheduler in EM.cpp! 1400 1403 */ 1401 bool remR3CanExecuteRaw(CPU State *env, RTGCPTR eip, unsigned fFlags, int *piException)1404 bool remR3CanExecuteRaw(CPUX86State *env, RTGCPTR eip, unsigned fFlags, int *piException) 1402 1405 { 1403 1406 /* !!! THIS MUST BE IN SYNC WITH emR3Reschedule !!! */ … … 1651 1654 * @param pu8Byte Where to store the byte on success 1652 1655 */ 1653 bool remR3GetOpcode(CPU State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte)1656 bool remR3GetOpcode(CPUX86State *env, RTGCPTR GCPtrInstr, uint8_t *pu8Byte) 1654 1657 { 1655 1658 int rc = PATMR3QueryOpcode(env->pVM, GCPtrInstr, pu8Byte); … … 1668 1671 * @param GCPtr The virtual address which page table/dir entry should be invalidated. 1669 1672 */ 1670 void remR3FlushPage(CPU State *env, RTGCPTR GCPtr)1673 void remR3FlushPage(CPUX86State *env, RTGCPTR GCPtr) 1671 1674 { 1672 1675 PVM pVM = env->pVM; … … 1712 1715 #ifndef REM_PHYS_ADDR_IN_TLB 1713 1716 /** Wrapper for PGMR3PhysTlbGCPhys2Ptr. */ 1714 void *remR3TlbGCPhys2Ptr(CPU State *env1, target_ulong physAddr, int fWritable)1717 void *remR3TlbGCPhys2Ptr(CPUX86State *env1, target_ulong physAddr, int fWritable) 1715 1718 { 1716 1719 void *pv; … … 1740 1743 * @param GCPtr Code page to monitor 1741 1744 */ 1742 void remR3ProtectCode(CPU State *env, RTGCPTR GCPtr)1745 void remR3ProtectCode(CPUX86State *env, RTGCPTR GCPtr) 1743 1746 { 1744 1747 #ifdef VBOX_REM_PROTECT_PAGES_FROM_SMC … … 1760 1763 * @param GCPtr Code page to monitor 1761 1764 */ 1762 void remR3UnprotectCode(CPU State *env, RTGCPTR GCPtr)1765 void remR3UnprotectCode(CPUX86State *env, RTGCPTR GCPtr) 1763 1766 { 1764 1767 Assert(env->pVM->rem.s.fInREM); … … 1781 1784 * @param fGlobal Set if the flush is global. 1782 1785 */ 1783 void remR3FlushTLB(CPU State *env, bool fGlobal)1786 void remR3FlushTLB(CPUX86State *env, bool fGlobal) 1784 1787 { 1785 1788 PVM pVM = env->pVM; … … 1825 1828 * @param env Pointer to the CPU environment. 1826 1829 */ 1827 void remR3ChangeCpuMode(CPU State *env)1830 void remR3ChangeCpuMode(CPUX86State *env) 1828 1831 { 1829 1832 PVM pVM = env->pVM; … … 1877 1880 * @param env Pointer to the CPU environment. 1878 1881 */ 1879 void remR3DmaRun(CPU State *env)1882 void remR3DmaRun(CPUX86State *env) 1880 1883 { 1881 1884 remR3ProfileStop(STATS_QEMU_RUN_EMULATED_CODE); … … 1890 1893 * @param env Pointer to the CPU environment. 1891 1894 */ 1892 void remR3TimersRun(CPU State *env)1895 void remR3TimersRun(CPUX86State *env) 1893 1896 { 1894 1897 LogFlow(("remR3TimersRun:\n")); … … 1911 1914 * @param pvNextEIP Next EIP 1912 1915 */ 1913 int remR3NotifyTrap(CPU State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP)1916 int remR3NotifyTrap(CPUX86State *env, uint32_t uTrap, uint32_t uErrorCode, RTGCPTR pvNextEIP) 1914 1917 { 1915 1918 PVM pVM = env->pVM; … … 1982 1985 * @param env Pointer to the CPU environment. 1983 1986 */ 1984 void remR3RecordCall(CPU State *env)1987 void remR3RecordCall(CPUX86State *env) 1985 1988 { 1986 1989 CSAMR3RecordCallAddress(env->pVM, env->eip); … … 3444 3447 * @param pTLBEntry The TLB entry. 3445 3448 */ 3446 target_ulong remR3PhysGetPhysicalAddressCode(CPU State*env,3449 target_ulong remR3PhysGetPhysicalAddressCode(CPUX86State *env, 3447 3450 target_ulong addr, 3448 CPUTLBEntry *pTLBEntry,3451 CPUTLBEntry *pTLBEntry, 3449 3452 target_phys_addr_t ioTLBEntry) 3450 3453 { … … 3938 3941 * @param pszPrefix 3939 3942 */ 3940 bool remR3DisasInstr(CPU State *env, int f32BitCode, char *pszPrefix)3943 bool remR3DisasInstr(CPUX86State *env, int f32BitCode, char *pszPrefix) 3941 3944 { 3942 3945 PVM pVM = env->pVM; … … 4392 4395 } 4393 4396 4394 int cpu_get_pic_interrupt(CPU State *env)4397 int cpu_get_pic_interrupt(CPUX86State *env) 4395 4398 { 4396 4399 uint8_t u8Interrupt; … … 4504 4507 #define LOG_GROUP LOG_GROUP_REM_IOPORT 4505 4508 4506 void cpu_outb(CPU State *env, pio_addr_t addr, uint8_t val)4509 void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val) 4507 4510 { 4508 4511 int rc; … … 4523 4526 } 4524 4527 4525 void cpu_outw(CPU State *env, pio_addr_t addr, uint16_t val)4528 void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val) 4526 4529 { 4527 4530 //Log2(("cpu_outw: addr=%#06x val=%#x\n", addr, val)); … … 4538 4541 } 4539 4542 4540 void cpu_outl(CPU State *env, pio_addr_t addr, uint32_t val)4543 void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val) 4541 4544 { 4542 4545 int rc; … … 4554 4557 } 4555 4558 4556 uint8_t cpu_inb(CPU State *env, pio_addr_t addr)4559 uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr) 4557 4560 { 4558 4561 uint32_t u32 = 0; … … 4574 4577 } 4575 4578 4576 uint16_t cpu_inw(CPU State *env, pio_addr_t addr)4579 uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr) 4577 4580 { 4578 4581 uint32_t u32 = 0; … … 4593 4596 } 4594 4597 4595 uint32_t cpu_inl(CPU State *env, pio_addr_t addr)4598 uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr) 4596 4599 { 4597 4600 uint32_t u32 = 0; … … 4673 4676 * raising a fatal VM error. 4674 4677 */ 4675 void cpu_abort(CPU State *env, const char *pszFormat, ...)4678 void cpu_abort(CPUX86State *env, const char *pszFormat, ...) 4676 4679 { 4677 4680 va_list va; … … 5414 5417 #endif 5415 5418 5416 void cpu_smm_update(CPU State *env)5417 { 5418 } 5419 void cpu_smm_update(CPUX86State *env) 5420 { 5421 } -
trunk/src/recompiler/bswap.h
r37675 r37689 206 206 #ifdef HOST_WORDS_BIGENDIAN 207 207 #define cpu_to_32wu cpu_to_be32wu 208 #define leul_to_cpu(v) glue(glue(le,HOST_LONG_BITS),_to_cpu)(v) 208 209 #else 209 210 #define cpu_to_32wu cpu_to_le32wu 211 #define leul_to_cpu(v) (v) 210 212 #endif 211 213 … … 215 217 #undef be_bswaps 216 218 219 /* len must be one of 1, 2, 4 */ 220 static inline uint32_t qemu_bswap_len(uint32_t value, int len) 221 { 222 return bswap32(value) >> (32 - 8 * len); 223 } 224 217 225 #endif /* BSWAP_H */ -
trunk/src/recompiler/cpu-all.h
r37675 r37689 348 348 } 349 349 350 #else /* !VBOX */350 #else /* !VBOX || !REM_PHYS_ADDR_IN_TLB */ 351 351 352 352 static inline int ldub_p(const void *ptr) … … 544 544 } 545 545 #endif 546 #endif /* !VBOX */ 546 547 #endif /* !VBOX || !REM_PHYS_ADDR_IN_TLB */ 547 548 548 549 #if !defined(HOST_WORDS_BIGENDIAN) || defined(WORDS_ALIGNED) … … 775 776 extern unsigned long guest_base; 776 777 extern int have_guest_base; 778 extern unsigned long reserved_va; 777 779 #define GUEST_BASE guest_base 780 #define RESERVED_VA reserved_va 778 781 #else 779 782 #define GUEST_BASE 0ul 783 #define RESERVED_VA 0ul 780 784 #endif 781 785 782 786 /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ 783 787 #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) 788 789 #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS 790 #define h2g_valid(x) 1 791 #else 792 #define h2g_valid(x) ({ \ 793 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ 794 __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \ 795 }) 796 #endif 797 784 798 #define h2g(x) ({ \ 785 799 unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ 786 800 /* Check if given address fits target address space */ \ 787 assert( __ret == (abi_ulong)__ret); \801 assert(h2g_valid(x)); \ 788 802 (abi_ulong)__ret; \ 789 })790 #define h2g_valid(x) ({ \791 unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \792 (__guest == (abi_ulong)__guest); \793 803 }) 794 804 … … 884 894 code */ 885 895 #define PAGE_WRITE_ORG 0x0010 896 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 897 /* FIXME: Code that sets/uses this is broken and needs to go away. */ 886 898 #define PAGE_RESERVED 0x0020 887 899 #endif 900 901 #if defined(CONFIG_USER_ONLY) 888 902 void page_dump(FILE *f); 889 int walk_memory_regions(void *, 890 int (*fn)(void *, unsigned long, unsigned long, unsigned long)); 903 904 typedef int (*walk_memory_regions_fn)(void *, abi_ulong, 905 abi_ulong, unsigned long); 906 int walk_memory_regions(void *, walk_memory_regions_fn); 907 891 908 int page_get_flags(target_ulong address); 892 909 void page_set_flags(target_ulong start, target_ulong end, int flags); 893 910 int page_check_range(target_ulong start, target_ulong len, int flags); 894 895 void cpu_exec_init_all(unsigned long tb_size); 911 #endif 912 896 913 CPUState *cpu_copy(CPUState *env); 897 914 CPUState *qemu_get_cpu(int cpu); … … 907 924 #ifndef VBOX 908 925 __attribute__ ((__format__ (__printf__, 2, 3))); 909 #else 926 #else /* VBOX */ 910 927 ; 911 #endif 928 #endif /* VBOX */ 912 929 extern CPUState *first_cpu; 913 930 extern CPUState *cpu_single_env; 914 extern int64_t qemu_icount;915 extern int use_icount;916 931 917 932 #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ … … 978 993 void cpu_single_step(CPUState *env, int enabled); 979 994 void cpu_reset(CPUState *s); 980 981 /* Return the physical page corresponding to a virtual one. Use it 982 only for debugging because no protection checks are done. Return -1 983 if no page found. */ 984 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); 995 int cpu_is_stopped(CPUState *env); 996 void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); 985 997 986 998 #define CPU_LOG_TB_OUT_ASM (1 << 0) … … 1008 1020 int cpu_str_to_log_mask(const char *str); 1009 1021 1010 /* IO ports API */ 1011 #include "ioport.h" 1022 #if !defined(CONFIG_USER_ONLY) 1023 1024 /* Return the physical page corresponding to a virtual one. Use it 1025 only for debugging because no protection checks are done. Return -1 1026 if no page found. */ 1027 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); 1012 1028 1013 1029 /* memory API */ … … 1015 1031 #ifndef VBOX 1016 1032 extern int phys_ram_fd; 1017 extern uint8_t *phys_ram_dirty;1018 1033 extern ram_addr_t ram_size; 1019 extern ram_addr_t last_ram_offset; 1020 #else /* VBOX */ 1021 /** This is required for bounds checking the phys_ram_dirty accesses. */ 1022 extern RTGCPHYS phys_ram_dirty_size; 1023 extern uint8_t *phys_ram_dirty; 1034 #endif /* !VBOX */ 1035 1036 typedef struct RAMBlock { 1037 uint8_t *host; 1038 ram_addr_t offset; 1039 ram_addr_t length; 1040 char idstr[256]; 1041 QLIST_ENTRY(RAMBlock) next; 1042 #if defined(__linux__) && !defined(TARGET_S390X) 1043 int fd; 1044 #endif 1045 } RAMBlock; 1046 1047 typedef struct RAMList { 1048 uint8_t *phys_dirty; 1049 #ifdef VBOX 1050 /** This is required for bounds checking the phys_ram_dirty accesses. 1051 * We have memory ranges (the high PC-BIOS mapping) which causes some pages 1052 * to fall outside the dirty map. */ 1053 RTGCPHYS phys_dirty_size; 1054 #if 0 1055 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) \ 1056 do { \ 1057 if (RT_UNLIKELY( ((addr) >> TARGET_PAGE_BITS) >= ram_list.phys_dirty_size)) { \ 1058 Log(("%s: %RGp\n", __FUNCTION__, (RTGCPHYS)addr)); \ 1059 return (rv); \ 1060 } \ 1061 } while (0) 1062 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) \ 1063 do { \ 1064 if (RT_UNLIKELY( ((addr) >> TARGET_PAGE_BITS) >= ram_list.phys_dirty_size)) { \ 1065 Log(("%s: %RGp\n", __FUNCTION__, (RTGCPHYS)addr)); \ 1066 return; \ 1067 } \ 1068 } while (0) 1069 #else 1070 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) \ 1071 AssertMsgReturn(((addr) >> TARGET_PAGE_BITS) < ram_list.phys_dirty_size, ("%#RGp\n", (RTGCPHYS)(addr)), (rv)); 1072 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) \ 1073 AssertMsgReturnVoid(((addr) >> TARGET_PAGE_BITS) < ram_list.phys_dirty_size, ("%#RGp\n", (RTGCPHYS)(addr))); 1074 # endif 1075 #else 1076 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr,rv) do {} while() 1077 # define VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr) do {} while() 1024 1078 #endif /* VBOX */ 1079 QLIST_HEAD(ram, RAMBlock) blocks; 1080 } RAMList; 1081 extern RAMList ram_list; 1082 1083 extern const char *mem_path; 1084 extern int mem_prealloc; 1025 1085 1026 1086 /* physical memory access */ … … 1042 1102 #define TLB_MMIO (1 << 5) 1043 1103 1044 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,1045 uint8_t *buf, int len, int is_write);1046 1047 1104 #define VGA_DIRTY_FLAG 0x01 1048 1105 #define CODE_DIRTY_FLAG 0x02 … … 1052 1109 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) 1053 1110 { 1054 #ifdef VBOX 1055 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1056 { 1057 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr)); 1058 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/ 1059 return 0; 1060 } 1061 #endif /* VBOX */ 1062 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1111 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0); 1112 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff; 1113 } 1114 1115 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) 1116 { 1117 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff); 1118 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; 1063 1119 } 1064 1120 … … 1066 1122 int dirty_flags) 1067 1123 { 1068 #ifdef VBOX 1069 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 1070 { 1071 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr)); 1072 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/ 1073 return 0xff & dirty_flags; /** @todo I don't think this is the right thing to return, fix! */ 1124 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff & dirty_flags); 1125 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; 1126 } 1127 1128 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) 1129 { 1130 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(addr); 1131 ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff; 1132 } 1133 1134 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, 1135 int dirty_flags) 1136 { 1137 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RET(addr, 0xff); 1138 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; 1139 } 1140 1141 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, 1142 int length, 1143 int dirty_flags) 1144 { 1145 int i, mask, len; 1146 uint8_t *p; 1147 1148 VBOX_RAMLIST_DIRTY_BOUNDS_CHECK_RETV(start); 1149 len = length >> TARGET_PAGE_BITS; 1150 mask = ~dirty_flags; 1151 p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); 1152 for (i = 0; i < len; i++) { 1153 p[i] &= mask; 1074 1154 } 1075 #endif /* VBOX */1076 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;1077 }1078 1079 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)1080 {1081 #ifdef VBOX1082 if (RT_UNLIKELY((addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size))1083 {1084 Log(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));1085 /*AssertMsgFailed(("cpu_physical_memory_is_dirty: %RGp\n", (RTGCPHYS)addr));*/1086 return;1087 }1088 #endif /* VBOX */1089 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;1090 1155 } 1091 1156 … … 1103 1168 void dump_exec_info(FILE *f, 1104 1169 int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); 1105 1106 /* Coalesced MMIO regions are areas where write operations can be reordered. 1107 * This usually implies that write operations are side-effect free. This allows 1108 * batching which can make a major impact on performance when using 1109 * virtualization. 1110 */ 1111 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 1112 1113 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 1114 1115 /*******************************************/ 1116 /* host CPU ticks (if available) */ 1117 1118 #if defined(_ARCH_PPC) 1119 1120 static inline int64_t cpu_get_real_ticks(void) 1121 { 1122 int64_t retval; 1123 #ifdef _ARCH_PPC64 1124 /* This reads timebase in one 64bit go and includes Cell workaround from: 1125 http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html 1126 */ 1127 __asm__ __volatile__ ( 1128 "mftb %0\n\t" 1129 "cmpwi %0,0\n\t" 1130 "beq- $-8" 1131 : "=r" (retval)); 1132 #else 1133 /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ 1134 unsigned long junk; 1135 __asm__ __volatile__ ( 1136 "mftbu %1\n\t" 1137 "mftb %L0\n\t" 1138 "mftbu %0\n\t" 1139 "cmpw %0,%1\n\t" 1140 "bne $-16" 1141 : "=r" (retval), "=r" (junk)); 1142 #endif 1143 return retval; 1144 } 1145 1146 #elif defined(__i386__) 1147 1148 static inline int64_t cpu_get_real_ticks(void) 1149 { 1150 int64_t val; 1151 asm volatile ("rdtsc" : "=A" (val)); 1152 return val; 1153 } 1154 1155 #elif defined(__x86_64__) 1156 1157 static inline int64_t cpu_get_real_ticks(void) 1158 { 1159 uint32_t low,high; 1160 int64_t val; 1161 asm volatile("rdtsc" : "=a" (low), "=d" (high)); 1162 val = high; 1163 val <<= 32; 1164 val |= low; 1165 return val; 1166 } 1167 1168 #elif defined(__hppa__) 1169 1170 static inline int64_t cpu_get_real_ticks(void) 1171 { 1172 int val; 1173 asm volatile ("mfctl %%cr16, %0" : "=r"(val)); 1174 return val; 1175 } 1176 1177 #elif defined(__ia64) 1178 1179 static inline int64_t cpu_get_real_ticks(void) 1180 { 1181 int64_t val; 1182 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); 1183 return val; 1184 } 1185 1186 #elif defined(__s390__) 1187 1188 static inline int64_t cpu_get_real_ticks(void) 1189 { 1190 int64_t val; 1191 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); 1192 return val; 1193 } 1194 1195 #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) 1196 1197 static inline int64_t cpu_get_real_ticks (void) 1198 { 1199 #if defined(_LP64) 1200 uint64_t rval; 1201 asm volatile("rd %%tick,%0" : "=r"(rval)); 1202 return rval; 1203 #else 1204 union { 1205 uint64_t i64; 1206 struct { 1207 uint32_t high; 1208 uint32_t low; 1209 } i32; 1210 } rval; 1211 asm volatile("rd %%tick,%1; srlx %1,32,%0" 1212 : "=r"(rval.i32.high), "=r"(rval.i32.low)); 1213 return rval.i64; 1214 #endif 1215 } 1216 1217 #elif defined(__mips__) && \ 1218 ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) 1219 /* 1220 * binutils wants to use rdhwr only on mips32r2 1221 * but as linux kernel emulate it, it's fine 1222 * to use it. 1223 * 1224 */ 1225 #define MIPS_RDHWR(rd, value) { \ 1226 __asm__ __volatile__ ( \ 1227 ".set push\n\t" \ 1228 ".set mips32r2\n\t" \ 1229 "rdhwr %0, "rd"\n\t" \ 1230 ".set pop" \ 1231 : "=r" (value)); \ 1232 } 1233 1234 static inline int64_t cpu_get_real_ticks(void) 1235 { 1236 /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */ 1237 uint32_t count; 1238 static uint32_t cyc_per_count = 0; 1239 1240 if (!cyc_per_count) 1241 MIPS_RDHWR("$3", cyc_per_count); 1242 1243 MIPS_RDHWR("$2", count); 1244 return (int64_t)(count * cyc_per_count); 1245 } 1246 1247 #else 1248 /* The host CPU doesn't have an easily accessible cycle counter. 1249 Just return a monotonically increasing value. This will be 1250 totally wrong, but hopefully better than nothing. */ 1251 static inline int64_t cpu_get_real_ticks (void) 1252 { 1253 static int64_t ticks = 0; 1254 return ticks++; 1255 } 1256 #endif 1257 1258 /* profiling */ 1259 #ifdef CONFIG_PROFILER 1260 static inline int64_t profile_getclock(void) 1261 { 1262 return cpu_get_real_ticks(); 1263 } 1264 1265 extern int64_t qemu_time, qemu_time_start; 1266 extern int64_t tlb_flush_time; 1267 extern int64_t dev_time; 1268 #endif 1170 #endif /* !CONFIG_USER_ONLY */ 1171 1172 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 1173 uint8_t *buf, int len, int is_write); 1269 1174 1270 1175 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, -
trunk/src/recompiler/cpu-common.h
r37675 r37689 4 4 /* CPU interfaces that are target indpendent. */ 5 5 6 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) 6 #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__) 7 7 #define WORDS_ALIGNED 8 8 #endif 9 9 10 #ifdef TARGET_PHYS_ADDR_BITS 11 #include "targphys.h" 12 #endif 13 14 #ifndef NEED_CPU_H 15 #include "poison.h" 16 #endif 17 10 18 #include "bswap.h" 19 #include "qemu-queue.h" 20 21 #if !defined(CONFIG_USER_ONLY) 11 22 12 23 /* address in the RAM (different from a physical address) */ … … 30 41 31 42 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); 32 ram_addr_t qemu_ram_alloc(ram_addr_t); 43 #ifndef VBOX 44 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, 45 ram_addr_t size, void *host); 46 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size); 33 47 void qemu_ram_free(ram_addr_t addr); 34 48 /* This should only be used for ram local to a device. */ … … 36 50 /* This should not be used by devices. */ 37 51 ram_addr_t qemu_ram_addr_from_host(void *ptr); 52 #endif /* !VBOX */ 38 53 39 54 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, … … 62 77 void cpu_unregister_map_client(void *cookie); 63 78 79 struct CPUPhysMemoryClient; 80 typedef struct CPUPhysMemoryClient CPUPhysMemoryClient; 81 struct CPUPhysMemoryClient { 82 void (*set_memory)(struct CPUPhysMemoryClient *client, 83 target_phys_addr_t start_addr, 84 ram_addr_t size, 85 ram_addr_t phys_offset); 86 int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client, 87 target_phys_addr_t start_addr, 88 target_phys_addr_t end_addr); 89 int (*migration_log)(struct CPUPhysMemoryClient *client, 90 int enable); 91 QLIST_ENTRY(CPUPhysMemoryClient) list; 92 }; 93 94 void cpu_register_phys_memory_client(CPUPhysMemoryClient *); 95 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *); 96 97 /* Coalesced MMIO regions are areas where write operations can be reordered. 98 * This usually implies that write operations are side-effect free. This allows 99 * batching which can make a major impact on performance when using 100 * virtualization. 101 */ 102 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 103 104 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); 105 106 void qemu_flush_coalesced_mmio_buffer(void); 107 64 108 uint32_t ldub_phys(target_phys_addr_t addr); 65 109 uint32_t lduw_phys(target_phys_addr_t addr); … … 86 130 #define IO_MEM_ROMD (1) 87 131 #define IO_MEM_SUBPAGE (2) 88 #define IO_MEM_SUBWIDTH (4) 132 133 #endif 89 134 90 135 #endif /* !CPU_COMMON_H */ -
trunk/src/recompiler/cpu-defs.h
r37675 r37689 39 39 #ifndef VBOX 40 40 #include <signal.h> 41 #endif 41 #else /* VBOX */ 42 # define sig_atomic_t int32_t 43 #endif /* VBOX */ 42 44 #include "osdep.h" 43 45 #include "qemu-queue.h" … … 91 93 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE) 92 94 95 #if !defined(CONFIG_USER_ONLY) 93 96 #define CPU_TLB_BITS 8 94 97 #define CPU_TLB_SIZE (1 << CPU_TLB_BITS) 95 98 96 #if TARGET_PHYS_ADDR_BITS == 32 && TARGET_LONG_BITS == 3299 #if HOST_LONG_BITS == 32 && TARGET_LONG_BITS == 32 97 100 #define CPU_TLB_ENTRY_BITS 4 98 101 #else … … 110 113 target_ulong addr_write; 111 114 target_ulong addr_code; 112 /* Addend to virtual address to get physicaladdress. IO accesses115 /* Addend to virtual address to get host address. IO accesses 113 116 use the corresponding iotlb value. */ 114 #if TARGET_PHYS_ADDR_BITS == 64 115 /* on i386 Linux make sure it is aligned */ 116 target_phys_addr_t addend __attribute__((aligned(8))); 117 #else 118 target_phys_addr_t addend; 119 #endif 117 unsigned long addend; 120 118 /* padding to get a power of two size */ 121 119 uint8_t dummy[(1 << CPU_TLB_ENTRY_BITS) - 122 120 (sizeof(target_ulong) * 3 + 123 ((-sizeof(target_ulong) * 3) & (sizeof( target_phys_addr_t) - 1)) +124 sizeof( target_phys_addr_t))];121 ((-sizeof(target_ulong) * 3) & (sizeof(unsigned long) - 1)) + 122 sizeof(unsigned long))]; 125 123 } CPUTLBEntry; 124 125 extern int CPUTLBEntry_wrong_size[sizeof(CPUTLBEntry) == (1 << CPU_TLB_ENTRY_BITS) ? 1 : -1]; 126 127 #define CPU_COMMON_TLB \ 128 /* The meaning of the MMU modes is defined in the target code. */ \ 129 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 130 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 131 target_ulong tlb_flush_addr; \ 132 target_ulong tlb_flush_mask; 133 134 #else 135 136 #define CPU_COMMON_TLB 137 138 #endif 139 126 140 127 141 #ifdef HOST_WORDS_BIGENDIAN … … 139 153 struct kvm_run; 140 154 struct KVMState; 155 struct qemu_work_item; 141 156 142 157 typedef struct CPUBreakpoint { … … 165 180 memory was accessed */ \ 166 181 uint32_t halted; /* Nonzero if the CPU is in suspend state */ \ 167 uint32_t stop; /* Stop request */ \168 uint32_t stopped; /* Artificially stopped */ \169 182 uint32_t interrupt_request; \ 170 volatile /*sig_atomic_t - vbox*/ int32_t exit_request; \ 171 /* The meaning of the MMU modes is defined in the target code. */ \ 172 CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \ 173 target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \ 174 /** addends for HVA -> GPA translations */ \ 175 VBOX_ONLY(target_phys_addr_t phys_addends[NB_MMU_MODES][CPU_TLB_SIZE]); \ 183 volatile sig_atomic_t exit_request; \ 184 CPU_COMMON_TLB \ 176 185 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; \ 177 186 /* buffer for temporaries in the code generator */ \ … … 213 222 \ 214 223 uint32_t created; \ 224 uint32_t stop; /* Stop request */ \ 225 uint32_t stopped; /* Artificially stopped */ \ 215 226 struct QemuThread *thread; \ 216 227 struct QemuCond *halt_cond; \ 228 struct qemu_work_item *queued_work_first, *queued_work_last; \ 217 229 const char *cpu_model_str; \ 218 230 struct KVMState *kvm_state; \ 219 231 struct kvm_run *kvm_run; \ 220 int kvm_fd; 221 222 #endif 232 int kvm_fd; \ 233 int kvm_vcpu_dirty; 234 235 #endif -
trunk/src/recompiler/cpu-exec.c
r37675 r37689 32 32 #include "tcg.h" 33 33 #include "kvm.h" 34 #include "qemu-barrier.h" 34 35 35 36 #if !defined(CONFIG_SOFTMMU) … … 67 68 void cpu_loop_exit(void) 68 69 { 69 /* NOTE: the register at this point must be saved by hand because 70 longjmp restore them */ 71 regs_to_env(); 70 env->current_tb = NULL; 72 71 longjmp(env->jmp_env, 1); 73 72 } … … 94 93 /* XXX: use siglongjmp ? */ 95 94 #ifdef __linux__ 95 #ifdef __ia64 96 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL); 97 #else 96 98 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); 99 #endif 97 100 #elif defined(__OpenBSD__) 98 101 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL); … … 125 128 next_tb = tcg_qemu_tb_exec(tb->tc_ptr); 126 129 #endif 130 env->current_tb = NULL; 127 131 128 132 if ((next_tb & 3) == 2) { … … 141 145 TranslationBlock *tb, **ptb1; 142 146 unsigned int h; 143 target_ulong phys_pc, phys_page1, phys_page2, virt_page2; 147 tb_page_addr_t phys_pc, phys_page1, phys_page2; 148 target_ulong virt_page2; 144 149 145 150 tb_invalidated_flag = 0; 146 151 147 regs_to_env(); /* XXX: do it just before cpu_gen_code() */148 149 152 /* find translated block using physical mappings */ 150 phys_pc = get_p hys_addr_code(env, pc);153 phys_pc = get_page_addr_code(env, pc); 151 154 phys_page1 = phys_pc & TARGET_PAGE_MASK; 152 155 phys_page2 = -1; … … 165 168 virt_page2 = (pc & TARGET_PAGE_MASK) + 166 169 TARGET_PAGE_SIZE; 167 phys_page2 = get_p hys_addr_code(env, virt_page2);170 phys_page2 = get_page_addr_code(env, virt_page2); 168 171 if (tb->page_addr[1] == phys_page2) 169 172 goto found; … … 226 229 /* main execution loop */ 227 230 228 #ifdef VBOX 231 volatile sig_atomic_t exit_request; 229 232 230 233 int cpu_exec(CPUState *env1) 231 234 { 232 #define DECLARE_HOST_REGS 1 233 #include "hostregs_helper.h" 234 int ret = 0, interrupt_request; 235 volatile host_reg_t saved_env_reg; 236 int ret VBOX_ONLY(= 0), interrupt_request; 235 237 TranslationBlock *tb; 236 238 uint8_t *tc_ptr; 239 #ifndef VBOX 240 uintptr_t next_tb; 241 #else /* VBOX */ 237 242 unsigned long next_tb; 238 239 cpu_single_env = env1; 240 241 /* first we save global registers */ 242 #define SAVE_HOST_REGS 1 243 #include "hostregs_helper.h" 244 env = env1; 245 246 env_to_regs(); 247 #if defined(TARGET_I386) 248 /* put eflags in CPU temporary format */ 249 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 250 DF = 1 - (2 * ((env->eflags >> 10) & 1)); 251 CC_OP = CC_OP_EFLAGS; 252 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 253 #elif defined(TARGET_SPARC) 254 #elif defined(TARGET_M68K) 255 env->cc_op = CC_OP_FLAGS; 256 env->cc_dest = env->sr & 0xf; 257 env->cc_x = (env->sr >> 4) & 1; 258 #elif defined(TARGET_ALPHA) 259 #elif defined(TARGET_ARM) 260 #elif defined(TARGET_PPC) 261 #elif defined(TARGET_MIPS) 262 #elif defined(TARGET_SH4) 263 #elif defined(TARGET_CRIS) 264 /* XXXXX */ 265 #else 266 #error unsupported target CPU 267 #endif 268 #ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */ 269 env->exception_index = -1; 270 #endif 271 272 /* prepare setjmp context for exception handling */ 273 for(;;) { 274 if (setjmp(env->jmp_env) == 0) 275 { 276 env->current_tb = NULL; 277 278 /* 279 * Check for fatal errors first 280 */ 281 if (env->interrupt_request & CPU_INTERRUPT_RC) { 282 env->exception_index = EXCP_RC; 283 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC); 284 ret = env->exception_index; 285 cpu_loop_exit(); 286 } 287 288 /* if an exception is pending, we execute it here */ 289 if (env->exception_index >= 0) { 290 if (env->exception_index >= EXCP_INTERRUPT) { 291 /* exit request from the cpu execution loop */ 292 ret = env->exception_index; 293 if (ret == EXCP_DEBUG) 294 cpu_handle_debug_exception(env); 295 break; 296 } else { 297 /* simulate a real cpu exception. On i386, it can 298 trigger new exceptions, but we do not handle 299 double or triple faults yet. */ 300 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 301 Log(("do_interrupt %d %d %RGv\n", env->exception_index, env->exception_is_int, (RTGCPTR)env->exception_next_eip)); 302 do_interrupt(env->exception_index, 303 env->exception_is_int, 304 env->error_code, 305 env->exception_next_eip, 0); 306 /* successfully delivered */ 307 env->old_exception = -1; 308 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 309 } 310 env->exception_index = -1; 311 } 312 313 next_tb = 0; /* force lookup of first TB */ 314 for(;;) 315 { 316 interrupt_request = env->interrupt_request; 317 if (unlikely(interrupt_request)) { 318 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) { 319 /* Mask out external interrupts for this step. */ 320 interrupt_request &= ~(CPU_INTERRUPT_HARD | 321 CPU_INTERRUPT_FIQ | 322 CPU_INTERRUPT_SMI | 323 CPU_INTERRUPT_NMI); 324 } 325 if (interrupt_request & CPU_INTERRUPT_DEBUG) { 326 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG; 327 env->exception_index = EXCP_DEBUG; 328 cpu_loop_exit(); 329 } 330 /** @todo: reconcile with what QEMU really does */ 331 332 /* Single instruction exec request, we execute it and return (one way or the other). 333 The caller will always reschedule after doing this operation! */ 334 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR) 335 { 336 /* not in flight are we? (if we are, we trapped) */ 337 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 338 { 339 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT); 340 env->exception_index = EXCP_SINGLE_INSTR; 341 if (emulate_single_instr(env) == -1) 342 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip)); 343 344 /* When we receive an external interrupt during execution of this single 345 instruction, then we should stay here. We will leave when we're ready 346 for raw-mode or when interrupted by pending EMT requests. */ 347 interrupt_request = env->interrupt_request; /* reload this! */ 348 if ( !(interrupt_request & CPU_INTERRUPT_HARD) 349 || !(env->eflags & IF_MASK) 350 || (env->hflags & HF_INHIBIT_IRQ_MASK) 351 || (env->state & CPU_RAW_HWACC) 352 ) 353 { 354 env->exception_index = ret = EXCP_SINGLE_INSTR; 355 cpu_loop_exit(); 356 } 357 } 358 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */ 359 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR); 360 #ifdef IEM_VERIFICATION_MODE 361 env->exception_index = ret = EXCP_SINGLE_INSTR; 362 cpu_loop_exit(); 363 #endif 364 } 365 366 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 367 if ((interrupt_request & CPU_INTERRUPT_SMI) && 368 !(env->hflags & HF_SMM_MASK)) { 369 env->interrupt_request &= ~CPU_INTERRUPT_SMI; 370 do_smm_enter(); 371 next_tb = 0; 372 } 373 else if ((interrupt_request & CPU_INTERRUPT_HARD) && 374 (env->eflags & IF_MASK) && 375 !(env->hflags & HF_INHIBIT_IRQ_MASK)) 376 { 377 /* if hardware interrupt pending, we execute it */ 378 int intno; 379 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD); 380 intno = cpu_get_pic_interrupt(env); 381 if (intno >= 0) 382 { 383 Log(("do_interrupt %d\n", intno)); 384 do_interrupt(intno, 0, 0, 0, 1); 385 } 386 /* ensure that no TB jump will be modified as 387 the program flow was changed */ 388 next_tb = 0; 389 } 390 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) 391 { 392 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB); 393 /* ensure that no TB jump will be modified as 394 the program flow was changed */ 395 next_tb = 0; 396 } 397 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 398 if (interrupt_request & CPU_INTERRUPT_RC) 399 { 400 env->exception_index = EXCP_RC; 401 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC); 402 ret = env->exception_index; 403 cpu_loop_exit(); 404 } 405 } 406 if (unlikely(env->exit_request)) { 407 env->exit_request = 0; 408 env->exception_index = EXCP_INTERRUPT; 409 cpu_loop_exit(); 410 } 411 412 /* 413 * Check if we the CPU state allows us to execute the code in raw-mode. 414 */ 415 RAWEx_ProfileStart(env, STATS_RAW_CHECK); 416 if (remR3CanExecuteRaw(env, 417 env->eip + env->segs[R_CS].base, 418 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)), 419 &env->exception_index)) 420 { 421 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 422 ret = env->exception_index; 423 cpu_loop_exit(); 424 } 425 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 426 427 { 428 RTGCPTR mypc = env->eip + env->segs[R_CS].base; 429 if (mypc == 0x00fe0d2 || mypc == 0x00f19e9 || mypc == 0x000f0827 || mypc == 0x000fe090) { 430 RTLogFlags(NULL, "enabled"); 431 loglevel = ~0; 432 Log(("BANG CRASH!\n")); 433 } 434 } 435 #ifdef CONFIG_DEBUG_EXEC 436 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 437 /* restore flags in standard format */ 438 regs_to_env(); 439 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 440 log_cpu_state(env, X86_DUMP_CCOP); 441 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 442 } 443 #endif 444 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP); 445 spin_lock(&tb_lock); 446 tb = tb_find_fast(); 447 /* Note: we do it here to avoid a gcc bug on Mac OS X when 448 doing it in tb_find_slow */ 449 if (tb_invalidated_flag) { 450 /* as some TB could have been invalidated because 451 of memory exceptions while generating the code, we 452 must recompute the hash index here */ 453 next_tb = 0; 454 tb_invalidated_flag = 0; 455 } 456 #ifdef CONFIG_DEBUG_EXEC 457 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s [sp=%RGv, bp=%RGv\n", 458 (long)tb->tc_ptr, tb->pc, lookup_symbol(tb->pc), (RTGCPTR)env->regs[R_ESP], (RTGCPTR)env->regs[R_EBP]); 459 #endif 460 461 462 /* see if we can patch the calling TB. When the TB 463 spans two pages, we cannot safely do a direct 464 jump. */ 465 if (next_tb != 0 466 && !(tb->cflags & CF_RAW_MODE) 467 && tb->page_addr[1] == -1) 468 { 469 tb_add_jump((TranslationBlock *)(long)(next_tb & ~3), next_tb & 3, tb); 470 } 471 spin_unlock(&tb_lock); 472 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP); 473 474 env->current_tb = tb; 475 476 /* cpu_interrupt might be called while translating the 477 TB, but before it is linked into a potentially 478 infinite loop and becomes env->current_tb. Avoid 479 starting execution if there is a pending interrupt. */ 480 if (unlikely (env->exit_request)) 481 env->current_tb = NULL; 482 483 while (env->current_tb) { 484 tc_ptr = tb->tc_ptr; 485 /* execute the generated code */ 486 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE); 487 #if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 488 tcg_qemu_tb_exec(tc_ptr, next_tb); 489 #else 490 next_tb = tcg_qemu_tb_exec(tc_ptr); 491 #endif 492 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE); 493 env->current_tb = NULL; 494 if ((next_tb & 3) == 2) { 495 /* Instruction counter expired. */ 496 int insns_left; 497 tb = (TranslationBlock *)(long)(next_tb & ~3); 498 /* Restore PC. */ 499 cpu_pc_from_tb(env, tb); 500 insns_left = env->icount_decr.u32; 501 if (env->icount_extra && insns_left >= 0) { 502 /* Refill decrementer and continue execution. */ 503 env->icount_extra += insns_left; 504 if (env->icount_extra > 0xffff) { 505 insns_left = 0xffff; 506 } else { 507 insns_left = env->icount_extra; 508 } 509 env->icount_extra -= insns_left; 510 env->icount_decr.u16.low = insns_left; 511 } else { 512 if (insns_left > 0) { 513 /* Execute remaining instructions. */ 514 cpu_exec_nocache(insns_left, tb); 515 } 516 env->exception_index = EXCP_INTERRUPT; 517 next_tb = 0; 518 cpu_loop_exit(); 519 } 520 } 521 } 522 523 /* reset soft MMU for next block (it can currently 524 only be set by a memory fault) */ 525 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) 526 if (env->hflags & HF_SOFTMMU_MASK) { 527 env->hflags &= ~HF_SOFTMMU_MASK; 528 /* do not allow linking to another block */ 529 next_tb = 0; 530 } 531 #endif 532 } /* for(;;) */ 533 } else { 534 env_to_regs(); 535 } 536 #ifdef VBOX_HIGH_RES_TIMERS_HACK 537 /* NULL the current_tb here so cpu_interrupt() doesn't do anything 538 unnecessary (like crashing during emulate single instruction). 539 Note! Don't use env1->pVM here, the code wouldn't run with 540 gcc-4.4/amd64 anymore, see #3883. */ 541 env->current_tb = NULL; 542 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC 543 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 544 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER) 545 || TMTimerPollBool(env->pVM, env->pVCpu)) ) { 546 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER); 547 remR3ProfileStart(STATS_QEMU_RUN_TIMERS); 548 TMR3TimerQueuesDo(env->pVM); 549 remR3ProfileStop(STATS_QEMU_RUN_TIMERS); 550 } 551 #endif 552 } /* for(;;) */ 553 554 #if defined(TARGET_I386) 555 /* restore flags in standard format */ 556 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 557 #else 558 #error unsupported target CPU 559 #endif 560 #include "hostregs_helper.h" 561 return ret; 562 } 563 564 #else /* !VBOX */ 565 int cpu_exec(CPUState *env1) 566 { 567 #define DECLARE_HOST_REGS 1 568 #include "hostregs_helper.h" 569 int ret, interrupt_request; 570 TranslationBlock *tb; 571 uint8_t *tc_ptr; 572 unsigned long next_tb; 573 243 #endif /* VBOX */ 244 245 # ifndef VBOX 574 246 if (cpu_halted(env1) == EXCP_HALTED) 575 247 return EXCP_HALTED; 248 # endif /* !VBOX */ 576 249 577 250 cpu_single_env = env1; 578 251 579 /* first we save global registers */ 580 #define SAVE_HOST_REGS 1 581 #include "hostregs_helper.h" 252 /* the access to env below is actually saving the global register's 253 value, so that files not including target-xyz/exec.h are free to 254 use it. */ 255 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env)); 256 saved_env_reg = (host_reg_t) env; 257 barrier(); 582 258 env = env1; 583 259 584 env_to_regs(); 260 if (unlikely(exit_request)) { 261 env->exit_request = 1; 262 } 263 585 264 #if defined(TARGET_I386) 586 265 if (!kvm_enabled()) { … … 608 287 #error unsupported target CPU 609 288 #endif 289 #ifndef VBOX /* VBOX: We need to raise traps and suchlike from the outside. */ 610 290 env->exception_index = -1; 291 #endif /* !VBOX */ 611 292 612 293 /* prepare setjmp context for exception handling */ … … 618 299 #define env cpu_single_env 619 300 #endif 620 env->current_tb = NULL; 301 #ifdef VBOX 302 env->current_tb = NULL; /* probably not needed, but whatever... */ 303 304 /* 305 * Check for fatal errors first 306 */ 307 if (env->interrupt_request & CPU_INTERRUPT_RC) { 308 env->exception_index = EXCP_RC; 309 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC); 310 ret = env->exception_index; 311 cpu_loop_exit(); 312 } 313 #endif 314 621 315 /* if an exception is pending, we execute it here */ 622 316 if (env->exception_index >= 0) { … … 624 318 /* exit request from the cpu execution loop */ 625 319 ret = env->exception_index; 320 #ifdef VBOX /* because of the above stuff */ 321 env->exception_index = -1; 322 #endif 626 323 if (ret == EXCP_DEBUG) 627 324 cpu_handle_debug_exception(env); … … 647 344 trigger new exceptions, but we do not handle 648 345 double or triple faults yet. */ 346 # ifdef VBOX 347 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 348 Log(("do_interrupt: vec=%#x int=%d pc=%04x:%RGv\n", env->exception_index, env->exception_is_int, 349 env->segs[R_CS].selector, (RTGCPTR)env->exception_next_eip)); 350 # endif /* VBOX */ 649 351 do_interrupt(env->exception_index, 650 352 env->exception_is_int, … … 653 355 /* successfully delivered */ 654 356 env->old_exception = -1; 357 # ifdef VBOX 358 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 359 # endif /* VBOX */ 655 360 #elif defined(TARGET_PPC) 656 361 do_interrupt(env); … … 664 369 do_interrupt(env); 665 370 #elif defined(TARGET_SH4) 666 371 do_interrupt(env); 667 372 #elif defined(TARGET_ALPHA) 668 373 do_interrupt(env); … … 672 377 do_interrupt(0); 673 378 #endif 379 env->exception_index = -1; 674 380 #endif 675 381 } 676 env->exception_index = -1;677 382 } 678 383 384 # ifndef VBOX 679 385 if (kvm_enabled()) { 680 386 kvm_cpu_exec(env); 681 387 longjmp(env->jmp_env, 1); 682 388 } 389 # endif /* !VBOX */ 683 390 684 391 next_tb = 0; /* force lookup of first TB */ … … 709 416 #endif 710 417 #if defined(TARGET_I386) 418 # ifdef VBOX 419 /* Single instruction exec request, we execute it and return (one way or the other). 420 The caller will always reschedule after doing this operation! */ 421 if (interrupt_request & CPU_INTERRUPT_SINGLE_INSTR) 422 { 423 /* not in flight are we? (if we are, we trapped) */ 424 if (!(env->interrupt_request & CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 425 { 426 ASMAtomicOrS32((int32_t volatile *)&env->interrupt_request, CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT); 427 env->exception_index = EXCP_SINGLE_INSTR; 428 if (emulate_single_instr(env) == -1) 429 AssertMsgFailed(("REM: emulate_single_instr failed for EIP=%RGv!!\n", (RTGCPTR)env->eip)); 430 431 /* When we receive an external interrupt during execution of this single 432 instruction, then we should stay here. We will leave when we're ready 433 for raw-mode or when interrupted by pending EMT requests. */ 434 interrupt_request = env->interrupt_request; /* reload this! */ 435 if ( !(interrupt_request & CPU_INTERRUPT_HARD) 436 || !(env->eflags & IF_MASK) 437 || (env->hflags & HF_INHIBIT_IRQ_MASK) 438 || (env->state & CPU_RAW_HWACC) 439 ) 440 { 441 env->exception_index = ret = EXCP_SINGLE_INSTR; 442 cpu_loop_exit(); 443 } 444 } 445 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */ 446 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR); 447 # ifdef IEM_VERIFICATION_MODE 448 env->exception_index = ret = EXCP_SINGLE_INSTR; 449 cpu_loop_exit(); 450 # endif 451 } 452 # endif /* VBOX */ 453 454 # ifndef VBOX /** @todo reconcile our code with the following... */ 711 455 if (interrupt_request & CPU_INTERRUPT_INIT) { 712 456 svm_check_intercept(SVM_EXIT_INIT); … … 768 512 } 769 513 } 514 # else /* VBOX */ 515 RAWEx_ProfileStart(env, STATS_IRQ_HANDLING); 516 if ((interrupt_request & CPU_INTERRUPT_SMI) && 517 !(env->hflags & HF_SMM_MASK)) { 518 env->interrupt_request &= ~CPU_INTERRUPT_SMI; 519 do_smm_enter(); 520 next_tb = 0; 521 } 522 else if ((interrupt_request & CPU_INTERRUPT_HARD) && 523 (env->eflags & IF_MASK) && 524 !(env->hflags & HF_INHIBIT_IRQ_MASK)) 525 { 526 /* if hardware interrupt pending, we execute it */ 527 int intno; 528 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_HARD); 529 intno = cpu_get_pic_interrupt(env); 530 if (intno >= 0) 531 { 532 Log(("do_interrupt %d\n", intno)); 533 do_interrupt(intno, 0, 0, 0, 1); 534 } 535 /* ensure that no TB jump will be modified as 536 the program flow was changed */ 537 next_tb = 0; 538 } 539 # endif /* VBOX */ 770 540 #elif defined(TARGET_PPC) 771 541 #if 0 … … 803 573 } 804 574 #elif defined(TARGET_SPARC) 805 if ( (interrupt_request & CPU_INTERRUPT_HARD) &&806 cpu_interrupts_enabled(env)) { 807 int pil = env->interrupt_index & 15; 808 int type = env->interrupt_index & 0xf0;809 810 if (((type == TT_EXTINT) && 811 (pil == 15 || pil > env->psrpil)) || 812 type != TT_EXTINT) { 813 env->interrupt_request &= ~CPU_INTERRUPT_HARD; 814 env->exception_index = env->interrupt_index;815 do_interrupt(env);816 env->interrupt_index= 0;817 next_tb = 0;818 575 if (interrupt_request & CPU_INTERRUPT_HARD) { 576 if (cpu_interrupts_enabled(env) && 577 env->interrupt_index > 0) { 578 int pil = env->interrupt_index & 0xf; 579 int type = env->interrupt_index & 0xf0; 580 581 if (((type == TT_EXTINT) && 582 cpu_pil_allowed(env, pil)) || 583 type != TT_EXTINT) { 584 env->exception_index = env->interrupt_index; 585 do_interrupt(env); 586 next_tb = 0; 587 } 588 } 819 589 } else if (interrupt_request & CPU_INTERRUPT_TIMER) { 820 590 //do_interrupt(0, 0, 0, 0, 0); … … 856 626 #elif defined(TARGET_CRIS) 857 627 if (interrupt_request & CPU_INTERRUPT_HARD 858 && (env->pregs[PR_CCS] & I_FLAG)) { 628 && (env->pregs[PR_CCS] & I_FLAG) 629 && !env->locked_irq) { 859 630 env->exception_index = EXCP_IRQ; 860 631 do_interrupt(env); … … 884 655 do_interrupt may have updated the EXITTB flag. */ 885 656 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) { 657 #ifndef VBOX 886 658 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB; 659 #else /* VBOX */ 660 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXITTB); 661 #endif /* VBOX */ 887 662 /* ensure that no TB jump will be modified as 888 663 the program flow was changed */ 889 664 next_tb = 0; 890 665 } 666 #ifdef VBOX 667 RAWEx_ProfileStop(env, STATS_IRQ_HANDLING); 668 if (interrupt_request & CPU_INTERRUPT_RC) { 669 env->exception_index = EXCP_RC; 670 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_RC); 671 ret = env->exception_index; 672 cpu_loop_exit(); 673 } 674 if (interrupt_request & (CPU_INTERRUPT_EXTERNAL_EXIT)) { 675 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~(CPU_INTERRUPT_EXTERNAL_EXIT)); 676 env->exit_request = 1; 677 } 678 #endif 891 679 } 892 680 if (unlikely(env->exit_request)) { … … 895 683 cpu_loop_exit(); 896 684 } 897 #ifdef CONFIG_DEBUG_EXEC 685 686 #ifdef VBOX 687 /* 688 * Check if we the CPU state allows us to execute the code in raw-mode. 689 */ 690 RAWEx_ProfileStart(env, STATS_RAW_CHECK); 691 if (remR3CanExecuteRaw(env, 692 env->eip + env->segs[R_CS].base, 693 env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)), 694 &env->exception_index)) 695 { 696 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 697 ret = env->exception_index; 698 cpu_loop_exit(); 699 } 700 RAWEx_ProfileStop(env, STATS_RAW_CHECK); 701 #endif /* VBOX */ 702 703 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC) 898 704 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { 899 705 /* restore flags in standard format */ 900 regs_to_env();901 706 #if defined(TARGET_I386) 902 707 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK); 903 708 log_cpu_state(env, X86_DUMP_CCOP); 904 709 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); 905 #elif defined(TARGET_ARM)906 log_cpu_state(env, 0);907 #elif defined(TARGET_SPARC)908 log_cpu_state(env, 0);909 #elif defined(TARGET_PPC)910 log_cpu_state(env, 0);911 710 #elif defined(TARGET_M68K) 912 711 cpu_m68k_flush_flags(env, env->cc_op); … … 915 714 | env->cc_dest | (env->cc_x << 4); 916 715 log_cpu_state(env, 0); 917 #el if defined(TARGET_MICROBLAZE)716 #else 918 717 log_cpu_state(env, 0); 919 #elif defined(TARGET_MIPS)920 log_cpu_state(env, 0);921 #elif defined(TARGET_SH4)922 log_cpu_state(env, 0);923 #elif defined(TARGET_ALPHA)924 log_cpu_state(env, 0);925 #elif defined(TARGET_CRIS)926 log_cpu_state(env, 0);927 #else928 #error unsupported target CPU929 718 #endif 930 719 } 931 #endif 720 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */ 721 #ifdef VBOX 722 RAWEx_ProfileStart(env, STATS_TLB_LOOKUP); 723 #endif /*VBOX*/ 932 724 spin_lock(&tb_lock); 933 725 tb = tb_find_fast(); … … 949 741 spans two pages, we cannot safely do a direct 950 742 jump. */ 951 { 952 if (next_tb != 0 && tb->page_addr[1] == -1) { 743 #ifndef VBOX 744 if (next_tb != 0 && tb->page_addr[1] == -1) { 745 #else /* VBOX */ 746 if (next_tb != 0 && !(tb->cflags & CF_RAW_MODE) && tb->page_addr[1] == -1) { 747 #endif /* VBOX */ 953 748 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb); 954 749 } 955 }956 750 spin_unlock(&tb_lock); 957 env->current_tb = tb; 751 #ifdef VBOX 752 RAWEx_ProfileStop(env, STATS_TLB_LOOKUP); 753 #endif 958 754 959 755 /* cpu_interrupt might be called while translating the … … 961 757 infinite loop and becomes env->current_tb. Avoid 962 758 starting execution if there is a pending interrupt. */ 963 if (unlikely (env->exit_request)) 964 env->current_tb = NULL; 965 966 while (env->current_tb) { 759 env->current_tb = tb; 760 barrier(); 761 if (likely(!env->exit_request)) { 967 762 tc_ptr = tb->tc_ptr; 968 763 /* execute the generated code */ 764 #ifdef VBOX 765 RAWEx_ProfileStart(env, STATS_QEMU_RUN_EMULATED_CODE); 766 #endif 969 767 #if defined(__sparc__) && !defined(CONFIG_SOLARIS) 970 768 #undef env … … 972 770 #define env cpu_single_env 973 771 #endif 772 #if defined(VBOX) && defined(GCC_WITH_BUGGY_REGPARM) 773 tcg_qemu_tb_exec(tc_ptr, next_tb); 774 #else 974 775 next_tb = tcg_qemu_tb_exec(tc_ptr); 975 env->current_tb = NULL; 776 #endif 777 #ifdef VBOX 778 RAWEx_ProfileStop(env, STATS_QEMU_RUN_EMULATED_CODE); 779 #endif 976 780 if ((next_tb & 3) == 2) { 977 781 /* Instruction counter expired. */ … … 1002 806 } 1003 807 } 808 env->current_tb = NULL; 1004 809 /* reset soft MMU for next block (it can currently 1005 810 only be set by a memory fault) */ 1006 811 } /* for(;;) */ 1007 } else {1008 env_to_regs();1009 812 } 813 #ifdef VBOX_HIGH_RES_TIMERS_HACK 814 /* NULL the current_tb here so cpu_interrupt() doesn't do anything 815 unnecessary (like crashing during emulate single instruction). 816 Note! Don't use env1->pVM here, the code wouldn't run with 817 gcc-4.4/amd64 anymore, see #3883. */ 818 env->current_tb = NULL; 819 if ( !(env->interrupt_request & ( CPU_INTERRUPT_DEBUG | CPU_INTERRUPT_EXTERNAL_EXIT | CPU_INTERRUPT_RC 820 | CPU_INTERRUPT_SINGLE_INSTR | CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT)) 821 && ( (env->interrupt_request & CPU_INTERRUPT_EXTERNAL_TIMER) 822 || TMTimerPollBool(env->pVM, env->pVCpu)) ) { 823 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_EXTERNAL_TIMER); 824 remR3ProfileStart(STATS_QEMU_RUN_TIMERS); 825 TMR3TimerQueuesDo(env->pVM); 826 remR3ProfileStop(STATS_QEMU_RUN_TIMERS); 827 } 828 #endif 1010 829 } /* for(;;) */ 1011 830 … … 1035 854 1036 855 /* restore global registers */ 1037 #include "hostregs_helper.h" 1038 856 barrier(); 857 env = (void *) saved_env_reg; 858 859 # ifndef VBOX /* we might be using elsewhere, we only have one. */ 1039 860 /* fail safe : never use cpu_single_env outside cpu_exec() */ 1040 861 cpu_single_env = NULL; 862 # endif 1041 863 return ret; 1042 864 } 1043 1044 #endif /* !VBOX */1045 865 1046 866 /* must only be called from the generated code as an exception can be … … 1288 1108 # define TRAP_sig(context) REG_sig(trap, context) 1289 1109 #endif /* linux */ 1110 1111 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 1112 #include <ucontext.h> 1113 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) 1114 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) 1115 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) 1116 # define XER_sig(context) ((context)->uc_mcontext.mc_xer) 1117 # define LR_sig(context) ((context)->uc_mcontext.mc_lr) 1118 # define CR_sig(context) ((context)->uc_mcontext.mc_cr) 1119 /* Exception Registers access */ 1120 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar) 1121 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) 1122 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) 1123 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */ 1290 1124 1291 1125 #ifdef __APPLE__ … … 1318 1152 { 1319 1153 siginfo_t *info = pinfo; 1154 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 1155 ucontext_t *uc = puc; 1156 #else 1320 1157 struct ucontext *uc = puc; 1158 #endif 1321 1159 unsigned long pc; 1322 1160 int is_write; … … 1494 1332 return handle_cpu_signal(ip, (unsigned long)info->si_addr, 1495 1333 is_write, 1496 &uc->uc_sigmask, puc);1334 (sigset_t *)&uc->uc_sigmask, puc); 1497 1335 } 1498 1336 … … 1505 1343 struct ucontext *uc = puc; 1506 1344 unsigned long pc; 1345 uint16_t *pinsn; 1346 int is_write = 0; 1347 1348 pc = uc->uc_mcontext.psw.addr; 1349 1350 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead 1351 of the normal 2 arguments. The 3rd argument contains the "int_code" 1352 from the hardware which does in fact contain the is_write value. 1353 The rt signal handler, as far as I can tell, does not give this value 1354 at all. Not that we could get to it from here even if it were. */ 1355 /* ??? This is not even close to complete, since it ignores all 1356 of the read-modify-write instructions. */ 1357 pinsn = (uint16_t *)pc; 1358 switch (pinsn[0] >> 8) { 1359 case 0x50: /* ST */ 1360 case 0x42: /* STC */ 1361 case 0x40: /* STH */ 1362 is_write = 1; 1363 break; 1364 case 0xc4: /* RIL format insns */ 1365 switch (pinsn[0] & 0xf) { 1366 case 0xf: /* STRL */ 1367 case 0xb: /* STGRL */ 1368 case 0x7: /* STHRL */ 1369 is_write = 1; 1370 } 1371 break; 1372 case 0xe3: /* RXY format insns */ 1373 switch (pinsn[2] & 0xff) { 1374 case 0x50: /* STY */ 1375 case 0x24: /* STG */ 1376 case 0x72: /* STCY */ 1377 case 0x70: /* STHY */ 1378 case 0x8e: /* STPQ */ 1379 case 0x3f: /* STRVH */ 1380 case 0x3e: /* STRV */ 1381 case 0x2f: /* STRVG */ 1382 is_write = 1; 1383 } 1384 break; 1385 } 1386 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1387 is_write, &uc->uc_sigmask, puc); 1388 } 1389 1390 #elif defined(__mips__) 1391 1392 int cpu_signal_handler(int host_signum, void *pinfo, 1393 void *puc) 1394 { 1395 siginfo_t *info = pinfo; 1396 struct ucontext *uc = puc; 1397 greg_t pc = uc->uc_mcontext.pc; 1507 1398 int is_write; 1508 1399 1509 pc = uc->uc_mcontext.psw.addr;1510 1400 /* XXX: compute is_write */ 1511 1401 is_write = 0; … … 1514 1404 } 1515 1405 1516 #elif defined(__ mips__)1406 #elif defined(__hppa__) 1517 1407 1518 1408 int cpu_signal_handler(int host_signum, void *pinfo, 1519 1409 void *puc) 1520 1410 { 1521 s iginfo_t*info = pinfo;1411 struct siginfo *info = pinfo; 1522 1412 struct ucontext *uc = puc; 1523 greg_t pc = uc->uc_mcontext.pc; 1524 int is_write; 1525 1526 /* XXX: compute is_write */ 1527 is_write = 0; 1413 unsigned long pc = uc->uc_mcontext.sc_iaoq[0]; 1414 uint32_t insn = *(uint32_t *)pc; 1415 int is_write = 0; 1416 1417 /* XXX: need kernel patch to get write flag faster. */ 1418 switch (insn >> 26) { 1419 case 0x1a: /* STW */ 1420 case 0x19: /* STH */ 1421 case 0x18: /* STB */ 1422 case 0x1b: /* STWM */ 1423 is_write = 1; 1424 break; 1425 1426 case 0x09: /* CSTWX, FSTWX, FSTWS */ 1427 case 0x0b: /* CSTDX, FSTDX, FSTDS */ 1428 /* Distinguish from coprocessor load ... */ 1429 is_write = (insn >> 9) & 1; 1430 break; 1431 1432 case 0x03: 1433 switch ((insn >> 6) & 15) { 1434 case 0xa: /* STWS */ 1435 case 0x9: /* STHS */ 1436 case 0x8: /* STBS */ 1437 case 0xe: /* STWAS */ 1438 case 0xc: /* STBYS */ 1439 is_write = 1; 1440 } 1441 break; 1442 } 1443 1528 1444 return handle_cpu_signal(pc, (unsigned long)info->si_addr, 1529 1445 is_write, &uc->uc_sigmask, puc); 1530 1446 } 1531 1447 1532 #elif defined(__hppa__)1533 1534 int cpu_signal_handler(int host_signum, void *pinfo,1535 void *puc)1536 {1537 struct siginfo *info = pinfo;1538 struct ucontext *uc = puc;1539 unsigned long pc;1540 int is_write;1541 1542 pc = uc->uc_mcontext.sc_iaoq[0];1543 /* FIXME: compute is_write */1544 is_write = 0;1545 return handle_cpu_signal(pc, (unsigned long)info->si_addr,1546 is_write,1547 &uc->uc_sigmask, puc);1548 }1549 1550 1448 #else 1551 1449 -
trunk/src/recompiler/cutils.c
r37675 r37689 612 612 613 613 #ifndef VBOX 614 614 615 /* 615 616 * Make sure data goes on disk, but if possible do not bother to … … 731 732 } 732 733 734 #ifndef _WIN32 735 /* Sets a specific flag */ 736 int fcntl_setfl(int fd, int flag) 737 { 738 int flags; 739 740 flags = fcntl(fd, F_GETFL); 741 if (flags == -1) 742 return -errno; 743 744 if (fcntl(fd, F_SETFL, flags | flag) == -1) 745 return -errno; 746 747 return 0; 748 } 749 #endif 750 733 751 #endif /* !VBOX */ 752 -
trunk/src/recompiler/def-helper.h
r37675 r37689 100 100 #define dh_is_64bit(t) glue(dh_is_64bit_, dh_alias(t)) 101 101 102 #define dh_is_signed_void 0 103 #define dh_is_signed_i32 0 104 #define dh_is_signed_s32 1 105 #define dh_is_signed_i64 0 106 #define dh_is_signed_s64 1 107 #define dh_is_signed_f32 0 108 #define dh_is_signed_f64 0 109 #define dh_is_signed_tl 0 110 #define dh_is_signed_int 1 111 /* ??? This is highly specific to the host cpu. There are even special 112 extension instructions that may be required, e.g. ia64's addp4. But 113 for now we don't support any 64-bit targets with 32-bit pointers. */ 114 #define dh_is_signed_ptr 0 115 #define dh_is_signed_env dh_is_signed_ptr 116 #define dh_is_signed(t) dh_is_signed_##t 117 118 #define dh_sizemask(t, n) \ 119 sizemask |= dh_is_64bit(t) << (n*2); \ 120 sizemask |= dh_is_signed(t) << (n*2+1) 121 102 122 #define dh_arg(t, n) \ 103 123 args[n - 1] = glue(GET_TCGV_, dh_alias(t))(glue(arg, n)); \ 104 sizemask |= dh_is_64bit(t) << n124 dh_sizemask(t, n) 105 125 106 126 #define dh_arg_decl(t, n) glue(TCGv_, dh_alias(t)) glue(arg, n) … … 157 177 { \ 158 178 TCGArg args[1]; \ 159 int sizemask ; \160 sizemask = dh_is_64bit(ret); \179 int sizemask = 0; \ 180 dh_sizemask(ret, 0); \ 161 181 dh_arg(t1, 1); \ 162 182 tcg_gen_helperN(HELPER(name), flags, sizemask, dh_retvar(ret), 1, args); \ … … 168 188 { \ 169 189 TCGArg args[2]; \ 170 int sizemask ; \171 sizemask = dh_is_64bit(ret); \190 int sizemask = 0; \ 191 dh_sizemask(ret, 0); \ 172 192 dh_arg(t1, 1); \ 173 193 dh_arg(t2, 2); \ … … 180 200 { \ 181 201 TCGArg args[3]; \ 182 int sizemask ; \183 sizemask = dh_is_64bit(ret); \202 int sizemask = 0; \ 203 dh_sizemask(ret, 0); \ 184 204 dh_arg(t1, 1); \ 185 205 dh_arg(t2, 2); \ … … 193 213 { \ 194 214 TCGArg args[4]; \ 195 int sizemask ; \196 sizemask = dh_is_64bit(ret); \215 int sizemask = 0; \ 216 dh_sizemask(ret, 0); \ 197 217 dh_arg(t1, 1); \ 198 218 dh_arg(t2, 2); \ -
trunk/src/recompiler/disas.h
r37675 r37689 25 25 struct elf64_sym; 26 26 27 #if defined(CONFIG_USER_ONLY) 28 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_ulong orig_addr); 29 #else 27 30 typedef const char *(*lookup_symbol_t)(struct syminfo *s, target_phys_addr_t orig_addr); 31 #endif 28 32 29 33 struct syminfo { -
trunk/src/recompiler/dyngen-exec.h
r37675 r37689 73 73 74 74 #if defined(__i386__) 75 # ifndef VBOX75 # ifndef VBOX 76 76 #define AREG0 "ebp" 77 #define AREG1 "ebx" 78 #define AREG2 "esi" 79 #else /* VBOX - why are we different? */ 80 # define AREG0 "esi" 81 # define AREG1 "edi" 82 #endif /* VBOX */ 77 # else /* VBOX - why are we different? frame-pointer optimizations on mac? */ 78 # define AREG0 "esi" 79 # endif /* VBOX */ 83 80 #elif defined(__x86_64__) 84 81 #define AREG0 "r14" 85 #define AREG1 "r15"86 #define AREG2 "r12"87 82 #elif defined(_ARCH_PPC) 88 83 #define AREG0 "r27" 89 #define AREG1 "r24"90 #define AREG2 "r25"91 84 #elif defined(__arm__) 92 85 #define AREG0 "r7" 93 #define AREG1 "r4"94 #define AREG2 "r5"95 86 #elif defined(__hppa__) 96 87 #define AREG0 "r17" 97 #define AREG1 "r14"98 #define AREG2 "r15"99 88 #elif defined(__mips__) 100 89 #define AREG0 "s0" 101 #define AREG1 "s1"102 #define AREG2 "fp"103 90 #elif defined(__sparc__) 104 91 #ifdef CONFIG_SOLARIS 105 92 #define AREG0 "g2" 106 #define AREG1 "g3"107 #define AREG2 "g4"108 93 #else 109 94 #ifdef __sparc_v9__ 110 95 #define AREG0 "g5" 111 #define AREG1 "g6"112 #define AREG2 "g7"113 96 #else 114 97 #define AREG0 "g6" 115 #define AREG1 "g1"116 #define AREG2 "g2"117 98 #endif 118 99 #endif 119 100 #elif defined(__s390__) 120 101 #define AREG0 "r10" 121 #define AREG1 "r7"122 #define AREG2 "r8"123 102 #elif defined(__alpha__) 124 103 /* Note $15 is the frame pointer, so anything in op-i386.c that would 125 104 require a frame pointer, like alloca, would probably loose. */ 126 105 #define AREG0 "$15" 127 #define AREG1 "$9"128 #define AREG2 "$10"129 106 #elif defined(__mc68000) 130 107 #define AREG0 "%a5" 131 #define AREG1 "%a4"132 #define AREG2 "%d7"133 108 #elif defined(__ia64__) 134 109 #define AREG0 "r7" 135 #define AREG1 "r4"136 #define AREG2 "r5"137 110 #else 138 111 #error unsupported CPU -
trunk/src/recompiler/elf.h
r37675 r37689 120 120 #define EM_S390_OLD 0xA390 121 121 122 #define EM_XILINX_MICROBLAZE 0xBAAB 122 #define EM_MICROBLAZE 189 123 #define EM_MICROBLAZE_OLD 0xBAAB 123 124 124 125 /* This is the info that is needed to parse the dynamic section of the file */ … … 244 245 #define R_386_GOTPC 10 245 246 #define R_386_NUM 11 247 /* Not a dynamic reloc, so not included in R_386_NUM. Used in TCG. */ 248 #define R_386_PC8 23 246 249 247 250 #define R_MIPS_NONE 0 -
trunk/src/recompiler/exec-all.h
r37675 r37689 31 31 32 32 #include "qemu-common.h" 33 34 /* allow to see translation results - the slowdown should be negligible, so we leave it */35 #ifndef VBOX36 #define DEBUG_DISAS37 #endif38 39 33 #ifdef VBOX 40 34 # include <VBox/vmm/tm.h> … … 48 42 #endif /* VBOX */ 49 43 44 /* allow to see translation results - the slowdown should be negligible, so we leave it */ 45 #ifndef VBOX 46 #define DEBUG_DISAS 47 #endif /* !VBOX */ 48 49 /* Page tracking code uses ram addresses in system mode, and virtual 50 addresses in userspace mode. Define tb_page_addr_t to be an appropriate 51 type. */ 52 #if defined(CONFIG_USER_ONLY) 53 typedef abi_ulong tb_page_addr_t; 54 #else 55 typedef ram_addr_t tb_page_addr_t; 56 #endif 57 50 58 /* is_jmp field values */ 51 59 #define DISAS_NEXT 0 /* next instruction can be analyzed */ … … 58 66 /* XXX: make safe guess about sizes */ 59 67 #define MAX_OP_PER_INSTR 96 60 /* A Call op needs up to 6 + 2N parameters (N = number of arguments). */ 61 #define MAX_OPC_PARAM 10 68 69 #if HOST_LONG_BITS == 32 70 #define MAX_OPC_PARAM_PER_ARG 2 71 #else 72 #define MAX_OPC_PARAM_PER_ARG 1 73 #endif 74 #define MAX_OPC_PARAM_IARGS 4 75 #define MAX_OPC_PARAM_OARGS 1 76 #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) 77 78 /* A Call op needs up to 4 + 2N parameters on 32-bit archs, 79 * and up to 4 + N parameters on 64-bit archs 80 * (N = number of input arguments + output arguments). */ 81 #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) 62 82 #define OPC_BUF_SIZE 640 63 83 #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) … … 72 92 73 93 extern target_ulong gen_opc_pc[OPC_BUF_SIZE]; 74 extern target_ulong gen_opc_npc[OPC_BUF_SIZE];75 extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];76 94 extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 77 95 extern uint16_t gen_opc_icount[OPC_BUF_SIZE]; 78 extern target_ulong gen_opc_jump_pc[2];79 extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];80 96 81 97 #include "qemu-log.h" … … 86 102 unsigned long searched_pc, int pc_pos, void *puc); 87 103 88 unsigned long code_gen_max_block_size(void);89 104 void cpu_gen_init(void); 90 105 int cpu_gen_code(CPUState *env, struct TranslationBlock *tb, … … 93 108 CPUState *env, unsigned long searched_pc, 94 109 void *puc); 95 int cpu_restore_state_copy(struct TranslationBlock *tb,96 CPUState *env, unsigned long searched_pc,97 void *puc);98 110 void cpu_resume_from_signal(CPUState *env1, void *puc); 99 111 void cpu_io_recompile(CPUState *env, void *retaddr); … … 104 116 void QEMU_NORETURN cpu_loop_exit(void); 105 117 int page_unprotect(target_ulong address, unsigned long pc, void *puc); 106 void tb_invalidate_phys_page_range(t arget_phys_addr_t start, target_phys_addr_t end,118 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 107 119 int is_cpu_write_access); 108 120 void tb_invalidate_page_range(target_ulong start, target_ulong end); 109 121 void tlb_flush_page(CPUState *env, target_ulong addr); 110 122 void tlb_flush(CPUState *env, int flush_global); 111 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 112 target_phys_addr_t paddr, int prot, 113 int mmu_idx, int is_softmmu); 114 static inline int tlb_set_page(CPUState *env1, target_ulong vaddr, 115 target_phys_addr_t paddr, int prot, 116 int mmu_idx, int is_softmmu) 117 { 118 if (prot & PAGE_READ) 119 prot |= PAGE_EXEC; 120 return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu); 121 } 123 #if !defined(CONFIG_USER_ONLY) 124 void tlb_set_page(CPUState *env, target_ulong vaddr, 125 target_phys_addr_t paddr, int prot, 126 int mmu_idx, target_ulong size); 127 #endif 122 128 123 129 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ … … 142 148 143 149 #ifdef VBOX /* bird: not safe in next step because of threading & cpu_interrupt. */ 144 # undef USE_DIRECT_JUMP150 # undef USE_DIRECT_JUMP 145 151 #endif /* VBOX */ 146 152 … … 154 160 #define CF_COUNT_MASK 0x7fff 155 161 #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ 156 157 162 #ifdef VBOX 158 # define CF_RAW_MODE0x0010 /* block was generated in raw mode */163 # define CF_RAW_MODE 0x0010 /* block was generated in raw mode */ 159 164 #endif 160 165 … … 165 170 of the pointer tells the index in page_next[] */ 166 171 struct TranslationBlock *page_next[2]; 167 t arget_ulongpage_addr[2];172 tb_page_addr_t page_addr[2]; 168 173 169 174 /* the following data are used to directly call another TB from … … 171 176 uint16_t tb_next_offset[2]; /* offset of original jump target */ 172 177 #ifdef USE_DIRECT_JUMP 173 uint16_t tb_jmp_offset[ 4]; /* offset of jump instruction */178 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ 174 179 #else 175 180 unsigned long tb_next[2]; /* address of jump generated code */ … … 199 204 } 200 205 201 static inline unsigned int tb_phys_hash_func( unsigned longpc)206 static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) 202 207 { 203 208 return pc & (CODE_GEN_PHYS_HASH_SIZE - 1); … … 207 212 void tb_free(TranslationBlock *tb); 208 213 void tb_flush(CPUState *env); 209 void tb_link_p hys(TranslationBlock *tb,210 t arget_ulong phys_pc, target_ulongphys_page2);211 void tb_phys_invalidate(TranslationBlock *tb, t arget_ulongpage_addr);214 void tb_link_page(TranslationBlock *tb, 215 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); 216 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); 212 217 213 218 extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 214 extern uint8_t *code_gen_ptr;215 extern int code_gen_max_blocks;216 219 217 220 #if defined(USE_DIRECT_JUMP) … … 262 265 offset = tb->tb_jmp_offset[n]; 263 266 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr); 264 offset = tb->tb_jmp_offset[n + 2];265 if (offset != 0xffff)266 tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);267 267 } 268 268 … … 294 294 TranslationBlock *tb_find_pc(unsigned long pc_ptr); 295 295 296 #include "qemu-lock.h" 297 298 extern spinlock_t tb_lock; 299 300 extern int tb_invalidated_flag; 301 302 #if !defined(CONFIG_USER_ONLY) 303 296 304 extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4]; 297 305 extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4]; 298 306 extern void *io_mem_opaque[IO_MEM_NB_ENTRIES]; 299 307 300 #include "qemu-lock.h"301 302 extern spinlock_t tb_lock;303 304 extern int tb_invalidated_flag;305 306 #if !defined(CONFIG_USER_ONLY)307 308 308 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, 309 309 void *retaddr); … … 334 334 335 335 #if defined(CONFIG_USER_ONLY) 336 static inline t arget_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)336 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) 337 337 { 338 338 return addr; … … 345 345 /* NOTE2: the returned address is not exactly the physical address: it 346 346 is the offset relative to phys_ram_base */ 347 static inline t arget_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)347 static inline tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr) 348 348 { 349 349 int mmu_idx, page_index, pd; … … 369 369 #endif 370 370 } 371 372 371 # if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) 373 372 return addr + env1->tlb_table[mmu_idx][page_index].addend; … … 381 380 # endif 382 381 } 383 384 /* Deterministic execution requires that IO only be performed on the last385 instruction of a TB so that interrupts take effect immediately. */386 static inline int can_do_io(CPUState *env)387 {388 if (!use_icount)389 return 1;390 391 /* If not executing code then assume we are ok. */392 if (!env->current_tb)393 return 1;394 395 return env->can_do_io != 0;396 }397 382 #endif 398 383 … … 401 386 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); 402 387 388 #ifndef VBOX 403 389 /* vl.c */ 404 #ifndef VBOX405 390 extern int singlestep; 391 392 /* cpu-exec.c */ 393 extern volatile sig_atomic_t exit_request; 406 394 #endif /*!VBOX*/ 407 395 408 #endif 396 397 #endif -
trunk/src/recompiler/exec.c
r37675 r37689 57 57 #ifndef VBOX 58 58 #include "hw/hw.h" 59 #endif 59 #include "hw/qdev.h" 60 #endif /* !VBOX */ 60 61 #include "osdep.h" 61 62 #include "kvm.h" 63 #include "qemu-timer.h" 62 64 #if defined(CONFIG_USER_ONLY) 63 65 #include <qemu.h> 66 #include <signal.h> 67 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) 68 #include <sys/param.h> 69 #if __FreeBSD_version >= 700104 70 #define HAVE_KINFO_GETVMMAP 71 #define sigqueue sigqueue_freebsd /* avoid redefinition */ 72 #include <sys/time.h> 73 #include <sys/proc.h> 74 #include <machine/profile.h> 75 #define _KERNEL 76 #include <sys/user.h> 77 #undef _KERNEL 78 #undef sigqueue 79 #include <libutil.h> 80 #endif 81 #endif 64 82 #endif 65 83 … … 83 101 #define SMC_BITMAP_USE_THRESHOLD 10 84 102 85 #if defined(TARGET_SPARC64)86 #define TARGET_PHYS_ADDR_SPACE_BITS 4187 #elif defined(TARGET_SPARC)88 #define TARGET_PHYS_ADDR_SPACE_BITS 3689 #elif defined(TARGET_ALPHA)90 #define TARGET_PHYS_ADDR_SPACE_BITS 4291 #define TARGET_VIRT_ADDR_SPACE_BITS 4292 #elif defined(TARGET_PPC64)93 #define TARGET_PHYS_ADDR_SPACE_BITS 4294 #elif defined(TARGET_X86_64)95 #define TARGET_PHYS_ADDR_SPACE_BITS 4296 #elif defined(TARGET_I386)97 #define TARGET_PHYS_ADDR_SPACE_BITS 3698 #else99 #define TARGET_PHYS_ADDR_SPACE_BITS 32100 #endif101 102 103 static TranslationBlock *tbs; 103 int code_gen_max_blocks;104 static int code_gen_max_blocks; 104 105 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; 105 106 static int nb_tbs; … … 126 127 uint8_t code_gen_prologue[1024] code_gen_section; 127 128 #else /* VBOX */ 128 extern uint8_t *code_gen_prologue;129 extern uint8_t *code_gen_prologue; 129 130 #endif /* VBOX */ 130 131 static uint8_t *code_gen_buffer; … … 132 133 /* threshold to flush the translated code buffer */ 133 134 static unsigned long code_gen_buffer_max_size; 134 uint8_t *code_gen_ptr; 135 136 #ifndef VBOX 135 static uint8_t *code_gen_ptr; 136 137 137 #if !defined(CONFIG_USER_ONLY) 138 # ifndef VBOX 138 139 int phys_ram_fd; 139 uint8_t *phys_ram_dirty;140 140 static int in_migration; 141 142 typedef struct RAMBlock { 143 uint8_t *host; 144 ram_addr_t offset; 145 ram_addr_t length; 146 struct RAMBlock *next; 147 } RAMBlock; 148 149 static RAMBlock *ram_blocks; 150 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) 151 then we can no longer assume contiguous ram offsets, and external uses 152 of this variable will break. */ 153 ram_addr_t last_ram_offset; 154 #endif 155 #else /* VBOX */ 156 /* we have memory ranges (the high PC-BIOS mapping) which 157 causes some pages to fall outside the dirty map here. */ 158 RTGCPHYS phys_ram_dirty_size; 159 uint8_t *phys_ram_dirty; 160 #endif /* VBOX */ 141 # endif /* !VBOX */ 142 143 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) }; 144 #endif 161 145 162 146 CPUState *first_cpu; … … 184 168 } PageDesc; 185 169 170 /* In system mode we want L1_MAP to be based on ram offsets, 171 while in user mode we want it to be based on virtual addresses. */ 172 #if !defined(CONFIG_USER_ONLY) 173 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS 174 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS 175 #else 176 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS 177 #endif 178 #else 179 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS 180 #endif 181 182 /* Size of the L2 (and L3, etc) page tables. */ 183 #define L2_BITS 10 184 #define L2_SIZE (1 << L2_BITS) 185 186 /* The bits remaining after N lower levels of page tables. */ 187 #define P_L1_BITS_REM \ 188 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 189 #define V_L1_BITS_REM \ 190 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) 191 192 /* Size of the L1 page table. Avoid silly small sizes. */ 193 #if P_L1_BITS_REM < 4 194 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS) 195 #else 196 #define P_L1_BITS P_L1_BITS_REM 197 #endif 198 199 #if V_L1_BITS_REM < 4 200 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS) 201 #else 202 #define V_L1_BITS V_L1_BITS_REM 203 #endif 204 205 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS) 206 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) 207 208 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS) 209 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) 210 211 unsigned long qemu_real_host_page_size; 212 unsigned long qemu_host_page_bits; 213 unsigned long qemu_host_page_size; 214 unsigned long qemu_host_page_mask; 215 216 /* This is a multi-level map on the virtual address space. 217 The bottom level has pointers to PageDesc. */ 218 static void *l1_map[V_L1_SIZE]; 219 220 #if !defined(CONFIG_USER_ONLY) 186 221 typedef struct PhysPageDesc { 187 222 /* offset in host memory of the page + io_index in the low bits */ … … 190 225 } PhysPageDesc; 191 226 192 #define L2_BITS 10 193 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS) 194 /* XXX: this is a temporary hack for alpha target. 195 * In the future, this is to be replaced by a multi-level table 196 * to actually be able to handle the complete 64 bits address space. 197 */ 198 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS) 199 #else 200 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS) 201 #endif 202 #ifdef VBOX 203 #define L0_BITS (TARGET_PHYS_ADDR_SPACE_BITS - 32) 204 #endif 205 206 #ifdef VBOX 207 #define L0_SIZE (1 << L0_BITS) 208 #endif 209 #define L1_SIZE (1 << L1_BITS) 210 #define L2_SIZE (1 << L2_BITS) 211 212 unsigned long qemu_real_host_page_size; 213 unsigned long qemu_host_page_bits; 214 unsigned long qemu_host_page_size; 215 unsigned long qemu_host_page_mask; 216 217 /* XXX: for system emulation, it could just be an array */ 218 #ifndef VBOX 219 static PageDesc *l1_map[L1_SIZE]; 220 static PhysPageDesc **l1_phys_map; 221 #else 222 static unsigned l0_map_max_used = 0; 223 static PageDesc **l0_map[L0_SIZE]; 224 static void **l0_phys_map[L0_SIZE]; 225 #endif 226 227 #if !defined(CONFIG_USER_ONLY) 227 /* This is a multi-level map on the physical address space. 228 The bottom level has pointers to PhysPageDesc. */ 229 static void *l1_phys_map[P_L1_SIZE]; 230 228 231 static void io_mem_init(void); 229 232 … … 238 241 #ifndef VBOX 239 242 /* log support */ 243 #ifdef WIN32 244 static const char *logfilename = "qemu.log"; 245 #else 240 246 static const char *logfilename = "/tmp/qemu.log"; 247 #endif 241 248 #endif /* !VBOX */ 242 249 FILE *logfile; … … 244 251 #ifndef VBOX 245 252 static int log_append = 0; 246 #endif 253 #endif /* !VBOX */ 247 254 248 255 /* statistics */ 249 256 #ifndef VBOX 257 #if !defined(CONFIG_USER_ONLY) 250 258 static int tlb_flush_count; 259 #endif 251 260 static int tb_flush_count; 252 261 static int tb_phys_invalidate_count; … … 256 265 uint32_t tb_phys_invalidate_count; 257 266 #endif /* VBOX */ 258 259 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)260 typedef struct subpage_t {261 target_phys_addr_t base;262 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];263 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];264 void *opaque[TARGET_PAGE_SIZE][2][4];265 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];266 } subpage_t;267 267 268 268 #ifndef VBOX … … 325 325 qemu_host_page_size = TARGET_PAGE_SIZE; 326 326 qemu_host_page_bits = 0; 327 #ifndef VBOX 328 while ((1 << qemu_host_page_bits) < qemu_host_page_size) 329 #else 330 while ((1 << qemu_host_page_bits) < (int)qemu_host_page_size) 331 #endif 327 while ((1 << qemu_host_page_bits) < VBOX_ONLY((int))qemu_host_page_size) 332 328 qemu_host_page_bits++; 333 329 qemu_host_page_mask = ~(qemu_host_page_size - 1); 334 #ifndef VBOX 335 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *)); 336 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *)); 337 #endif 338 339 #ifdef VBOX 340 /* We use other means to set reserved bit on our pages */ 341 #else /* !VBOX */ 342 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY) 330 331 #ifndef VBOX /* We use other means to set reserved bit on our pages */ 332 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) 343 333 { 344 long long startaddr, endaddr; 334 #ifdef HAVE_KINFO_GETVMMAP 335 struct kinfo_vmentry *freep; 336 int i, cnt; 337 338 freep = kinfo_getvmmap(getpid(), &cnt); 339 if (freep) { 340 mmap_lock(); 341 for (i = 0; i < cnt; i++) { 342 unsigned long startaddr, endaddr; 343 344 startaddr = freep[i].kve_start; 345 endaddr = freep[i].kve_end; 346 if (h2g_valid(startaddr)) { 347 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 348 349 if (h2g_valid(endaddr)) { 350 endaddr = h2g(endaddr); 351 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 352 } else { 353 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS 354 endaddr = ~0ul; 355 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 356 #endif 357 } 358 } 359 } 360 free(freep); 361 mmap_unlock(); 362 } 363 #else 345 364 FILE *f; 346 int n; 347 348 mmap_lock(); 365 349 366 last_brk = (unsigned long)sbrk(0); 350 f = fopen("/proc/self/maps", "r"); 367 368 f = fopen("/compat/linux/proc/self/maps", "r"); 351 369 if (f) { 370 mmap_lock(); 371 352 372 do { 353 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr); 354 if (n == 2) { 355 startaddr = MIN(startaddr, 356 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); 357 endaddr = MIN(endaddr, 358 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1); 359 page_set_flags(startaddr & TARGET_PAGE_MASK, 360 TARGET_PAGE_ALIGN(endaddr), 361 PAGE_RESERVED); 373 unsigned long startaddr, endaddr; 374 int n; 375 376 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); 377 378 if (n == 2 && h2g_valid(startaddr)) { 379 startaddr = h2g(startaddr) & TARGET_PAGE_MASK; 380 381 if (h2g_valid(endaddr)) { 382 endaddr = h2g(endaddr); 383 } else { 384 endaddr = ~0ul; 385 } 386 page_set_flags(startaddr, endaddr, PAGE_RESERVED); 362 387 } 363 388 } while (!feof(f)); 389 364 390 fclose(f); 365 } 366 mmap_unlock(); 391 mmap_unlock(); 392 } 393 #endif 367 394 } 368 395 #endif … … 370 397 } 371 398 372 static inline PageDesc **page_l1_map(target_ulong index) 373 { 374 #ifndef VBOX 375 #if TARGET_LONG_BITS > 32 376 /* Host memory outside guest VM. For 32-bit targets we have already 377 excluded high addresses. */ 378 if (index > ((target_ulong)L2_SIZE * L1_SIZE)) 379 return NULL; 380 #endif 381 return &l1_map[index >> L2_BITS]; 382 #else /* VBOX */ 383 PageDesc **l1_map; 384 AssertMsgReturn(index < (target_ulong)L2_SIZE * L1_SIZE * L0_SIZE, 385 ("index=%RGp >= %RGp; L1_SIZE=%#x L2_SIZE=%#x L0_SIZE=%#x\n", 386 (RTGCPHYS)index, (RTGCPHYS)L2_SIZE * L1_SIZE, L1_SIZE, L2_SIZE, L0_SIZE), 387 NULL); 388 l1_map = l0_map[index >> (L1_BITS + L2_BITS)]; 389 if (RT_UNLIKELY(!l1_map)) 390 { 391 unsigned i0 = index >> (L1_BITS + L2_BITS); 392 l0_map[i0] = l1_map = qemu_mallocz(sizeof(PageDesc *) * L1_SIZE); 393 if (RT_UNLIKELY(!l1_map)) 399 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) 400 { 401 PageDesc *pd; 402 void **lp; 403 int i; 404 405 #if defined(CONFIG_USER_ONLY) 406 /* We can't use qemu_malloc because it may recurse into a locked mutex. */ 407 # define ALLOC(P, SIZE) \ 408 do { \ 409 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ 410 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ 411 } while (0) 412 #else 413 # define ALLOC(P, SIZE) \ 414 do { P = qemu_mallocz(SIZE); } while (0) 415 #endif 416 417 /* Level 1. Always allocated. */ 418 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); 419 420 /* Level 2..N-1. */ 421 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 422 void **p = *lp; 423 424 if (p == NULL) { 425 if (!alloc) { 426 return NULL; 427 } 428 ALLOC(p, sizeof(void *) * L2_SIZE); 429 *lp = p; 430 } 431 432 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 433 } 434 435 pd = *lp; 436 if (pd == NULL) { 437 if (!alloc) { 394 438 return NULL; 395 if (i0 >= l0_map_max_used) 396 l0_map_max_used = i0 + 1; 397 } 398 return &l1_map[(index >> L2_BITS) & (L1_SIZE - 1)]; 399 #endif /* VBOX */ 400 } 401 402 static inline PageDesc *page_find_alloc(target_ulong index) 403 { 404 PageDesc **lp, *p; 405 lp = page_l1_map(index); 406 if (!lp) 407 return NULL; 408 409 p = *lp; 410 if (!p) { 411 /* allocate if not found */ 412 #if defined(CONFIG_USER_ONLY) 413 size_t len = sizeof(PageDesc) * L2_SIZE; 414 /* Don't use qemu_malloc because it may recurse. */ 415 p = mmap(NULL, len, PROT_READ | PROT_WRITE, 416 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 417 *lp = p; 418 if (h2g_valid(p)) { 419 unsigned long addr = h2g(p); 420 page_set_flags(addr & TARGET_PAGE_MASK, 421 TARGET_PAGE_ALIGN(addr + len), 422 PAGE_RESERVED); 423 } 424 #else 425 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE); 426 *lp = p; 427 #endif 428 } 429 return p + (index & (L2_SIZE - 1)); 430 } 431 432 static inline PageDesc *page_find(target_ulong index) 433 { 434 PageDesc **lp, *p; 435 lp = page_l1_map(index); 436 if (!lp) 437 return NULL; 438 439 p = *lp; 440 if (!p) { 441 return NULL; 442 } 443 return p + (index & (L2_SIZE - 1)); 444 } 445 439 } 440 ALLOC(pd, sizeof(PageDesc) * L2_SIZE); 441 *lp = pd; 442 } 443 444 #undef ALLOC 445 446 return pd + (index & (L2_SIZE - 1)); 447 } 448 449 static inline PageDesc *page_find(tb_page_addr_t index) 450 { 451 return page_find_alloc(index, 0); 452 } 453 454 #if !defined(CONFIG_USER_ONLY) 446 455 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) 447 456 { 448 void **lp, **p;449 457 PhysPageDesc *pd; 450 451 #ifndef VBOX 452 p = (void **)l1_phys_map; 453 #if TARGET_PHYS_ADDR_SPACE_BITS > 32 454 455 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) 456 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS 457 #endif 458 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); 459 p = *lp; 460 if (!p) { 461 /* allocate if not found */ 462 if (!alloc) 458 void **lp; 459 int i; 460 461 /* Level 1. Always allocated. */ 462 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1)); 463 464 /* Level 2..N-1. */ 465 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) { 466 void **p = *lp; 467 if (p == NULL) { 468 if (!alloc) { 469 return NULL; 470 } 471 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE); 472 } 473 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); 474 } 475 476 pd = *lp; 477 if (pd == NULL) { 478 int i; 479 480 if (!alloc) { 463 481 return NULL; 464 p = qemu_vmalloc(sizeof(void *) * L1_SIZE); 465 memset(p, 0, sizeof(void *) * L1_SIZE); 466 *lp = p; 467 } 468 #endif 469 #else /* VBOX */ 470 /* level 0 lookup and lazy allocation of level 1 map. */ 471 if (RT_UNLIKELY(index >= (target_phys_addr_t)L2_SIZE * L1_SIZE * L0_SIZE)) 472 return NULL; 473 p = l0_phys_map[index >> (L1_BITS + L2_BITS)]; 474 if (RT_UNLIKELY(!p)) { 475 if (!alloc) 476 return NULL; 477 p = qemu_vmalloc(sizeof(void **) * L1_SIZE); 478 memset(p, 0, sizeof(void **) * L1_SIZE); 479 l0_phys_map[index >> (L1_BITS + L2_BITS)] = p; 480 } 481 482 /* level 1 lookup and lazy allocation of level 2 map. */ 483 #endif /* VBOX */ 484 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); 485 pd = *lp; 486 if (!pd) { 487 int i; 488 /* allocate if not found */ 489 if (!alloc) 490 return NULL; 491 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); 492 *lp = pd; 482 } 483 484 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE); 485 493 486 for (i = 0; i < L2_SIZE; i++) { 494 pd[i].phys_offset = IO_MEM_UNASSIGNED; 495 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 496 } 497 } 498 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1)); 487 pd[i].phys_offset = IO_MEM_UNASSIGNED; 488 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS; 489 } 490 } 491 492 return pd + (index & (L2_SIZE - 1)); 499 493 } 500 494 … … 504 498 } 505 499 506 #if !defined(CONFIG_USER_ONLY)507 500 static void tlb_protect_code(ram_addr_t ram_addr); 508 501 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, … … 530 523 531 524 #ifdef USE_STATIC_CODE_GEN_BUFFER 532 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]; 525 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] 526 __attribute__((aligned (CODE_GEN_ALIGN))); 533 527 #endif 534 528 … … 594 588 if (code_gen_buffer_size > 16 * 1024 * 1024) 595 589 code_gen_buffer_size = 16 * 1024 * 1024; 590 #elif defined(__s390x__) 591 /* Map the buffer so that we can use direct calls and branches. */ 592 /* We have a +- 4GB range on the branches; leave some slop. */ 593 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) { 594 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024; 595 } 596 start = (void *)0x90000000UL; 596 597 #endif 597 598 code_gen_buffer = mmap(start, code_gen_buffer_size, … … 637 638 #endif 638 639 code_gen_buffer_max_size = code_gen_buffer_size - 639 code_gen_max_block_size();640 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE); 640 641 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; 641 642 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); … … 654 655 io_mem_init(); 655 656 #endif 657 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) 658 /* There's no guest base to take into account, so go ahead and 659 initialize the prologue now. */ 660 tcg_prologue_init(&tcg_ctx); 661 #endif 656 662 } 657 663 658 664 #ifndef VBOX 659 665 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 660 661 static void cpu_common_pre_save(void *opaque)662 {663 CPUState *env = opaque;664 665 cpu_synchronize_state(env);666 }667 668 static int cpu_common_pre_load(void *opaque)669 {670 CPUState *env = opaque;671 672 cpu_synchronize_state(env);673 return 0;674 }675 666 676 667 static int cpu_common_post_load(void *opaque, int version_id) … … 691 682 .minimum_version_id = 1, 692 683 .minimum_version_id_old = 1, 693 .pre_save = cpu_common_pre_save,694 .pre_load = cpu_common_pre_load,695 684 .post_load = cpu_common_post_load, 696 685 .fields = (VMStateField []) { … … 742 731 #endif 743 732 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) 744 vmstate_register( cpu_index, &vmstate_cpu_common, env);745 register_savevm( "cpu", cpu_index, CPU_SAVE_VERSION,733 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env); 734 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, 746 735 cpu_save, cpu_load, env); 747 736 #endif … … 758 747 } 759 748 760 /* set to NULL all the 'first_tb' fields in all PageDescs */ 749 /* Set to NULL all the 'first_tb' fields in all PageDescs. */ 750 751 static void page_flush_tb_1 (int level, void **lp) 752 { 753 int i; 754 755 if (*lp == NULL) { 756 return; 757 } 758 if (level == 0) { 759 PageDesc *pd = *lp; 760 for (i = 0; i < L2_SIZE; ++i) { 761 pd[i].first_tb = NULL; 762 invalidate_page_bitmap(pd + i); 763 } 764 } else { 765 void **pp = *lp; 766 for (i = 0; i < L2_SIZE; ++i) { 767 page_flush_tb_1 (level - 1, pp + i); 768 } 769 } 770 } 771 761 772 static void page_flush_tb(void) 762 773 { 763 int i, j; 764 PageDesc *p; 765 #ifdef VBOX 766 int k; 767 #endif 768 769 #ifdef VBOX 770 k = l0_map_max_used; 771 while (k-- > 0) { 772 PageDesc **l1_map = l0_map[k]; 773 if (l1_map) { 774 #endif 775 for(i = 0; i < L1_SIZE; i++) { 776 p = l1_map[i]; 777 if (p) { 778 for(j = 0; j < L2_SIZE; j++) { 779 p->first_tb = NULL; 780 invalidate_page_bitmap(p); 781 p++; 782 } 783 } 784 } 785 #ifdef VBOX 786 } 787 } 788 #endif 774 int i; 775 for (i = 0; i < V_L1_SIZE; i++) { 776 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); 777 } 789 778 } 790 779 … … 930 919 } 931 920 932 void tb_phys_invalidate(TranslationBlock *tb, t arget_ulongpage_addr)921 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) 933 922 { 934 923 CPUState *env; 935 924 PageDesc *p; 936 925 unsigned int h, n1; 937 t arget_phys_addr_t phys_pc;926 tb_page_addr_t phys_pc; 938 927 TranslationBlock *tb1, *tb2; 939 928 … … 1094 1083 TranslationBlock *tb; 1095 1084 uint8_t *tc_ptr; 1096 target_ulong phys_pc, phys_page2, virt_page2; 1085 tb_page_addr_t phys_pc, phys_page2; 1086 target_ulong virt_page2; 1097 1087 int code_gen_size; 1098 1088 1099 phys_pc = get_p hys_addr_code(env, pc);1089 phys_pc = get_page_addr_code(env, pc); 1100 1090 tb = tb_alloc(pc); 1101 1091 if (!tb) { … … 1119 1109 phys_page2 = -1; 1120 1110 if ((pc & TARGET_PAGE_MASK) != virt_page2) { 1121 phys_page2 = get_p hys_addr_code(env, virt_page2);1122 } 1123 tb_link_p hys(tb, phys_pc, phys_page2);1111 phys_page2 = get_page_addr_code(env, virt_page2); 1112 } 1113 tb_link_page(tb, phys_pc, phys_page2); 1124 1114 return tb; 1125 1115 } … … 1130 1120 from a real cpu write access: the virtual CPU will exit the current 1131 1121 TB if code is modified inside this TB. */ 1132 void tb_invalidate_phys_page_range(t arget_phys_addr_t start, target_phys_addr_t end,1122 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, 1133 1123 int is_cpu_write_access) 1134 1124 { 1135 1125 TranslationBlock *tb, *tb_next, *saved_tb; 1136 1126 CPUState *env = cpu_single_env; 1137 t arget_ulongtb_start, tb_end;1127 tb_page_addr_t tb_start, tb_end; 1138 1128 PageDesc *p; 1139 1129 int n; … … 1237 1227 1238 1228 /* len must be <= 8 and start must be a multiple of len */ 1239 static inline void tb_invalidate_phys_page_fast(t arget_phys_addr_t start, int len)1229 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) 1240 1230 { 1241 1231 PageDesc *p; … … 1264 1254 1265 1255 #if !defined(CONFIG_SOFTMMU) 1266 static void tb_invalidate_phys_page(t arget_phys_addr_t addr,1256 static void tb_invalidate_phys_page(tb_page_addr_t addr, 1267 1257 unsigned long pc, void *puc) 1268 1258 { … … 1326 1316 /* add the tb in the target page and protect it if necessary */ 1327 1317 static inline void tb_alloc_page(TranslationBlock *tb, 1328 unsigned int n, t arget_ulongpage_addr)1318 unsigned int n, tb_page_addr_t page_addr) 1329 1319 { 1330 1320 PageDesc *p; … … 1332 1322 1333 1323 tb->page_addr[n] = page_addr; 1334 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS );1324 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); 1335 1325 tb->page_next[n] = p->first_tb; 1336 1326 last_first_tb = p->first_tb; … … 1358 1348 prot |= p2->flags; 1359 1349 p2->flags &= ~PAGE_WRITE; 1360 page_get_flags(addr);1361 1350 } 1362 1351 mprotect(g2h(page_addr), qemu_host_page_size, … … 1386 1375 1387 1376 if (nb_tbs >= code_gen_max_blocks || 1388 #ifndef VBOX 1389 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) 1390 #else 1391 (code_gen_ptr - code_gen_buffer) >= (int)code_gen_buffer_max_size) 1392 #endif 1377 (code_gen_ptr - code_gen_buffer) >= VBOX_ONLY((unsigned long))code_gen_buffer_max_size) 1393 1378 return NULL; 1394 1379 tb = &tbs[nb_tbs++]; … … 1411 1396 /* add a new TB and link it to the physical page tables. phys_page2 is 1412 1397 (-1) to indicate that only one page contains the TB. */ 1413 void tb_link_p hys(TranslationBlock *tb,1414 t arget_ulong phys_pc, target_ulongphys_page2)1398 void tb_link_page(TranslationBlock *tb, 1399 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) 1415 1400 { 1416 1401 unsigned int h; … … 1528 1513 1529 1514 #if defined(TARGET_HAS_ICE) 1515 #if defined(CONFIG_USER_ONLY) 1516 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1517 { 1518 tb_invalidate_phys_page_range(pc, pc + 1, 0); 1519 } 1520 #else 1530 1521 static void breakpoint_invalidate(CPUState *env, target_ulong pc) 1531 1522 { … … 1546 1537 } 1547 1538 #endif 1548 1539 #endif /* TARGET_HAS_ICE */ 1540 1541 #if defined(CONFIG_USER_ONLY) 1542 void cpu_watchpoint_remove_all(CPUState *env, int mask) 1543 1544 { 1545 } 1546 1547 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, 1548 int flags, CPUWatchpoint **watchpoint) 1549 { 1550 return -ENOSYS; 1551 } 1552 #else 1549 1553 /* Add a watchpoint. */ 1550 1554 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len, … … 1624 1628 } 1625 1629 } 1630 #endif 1626 1631 1627 1632 /* Add a breakpoint. */ … … 1762 1767 static void cpu_unlink_tb(CPUState *env) 1763 1768 { 1764 #if defined(CONFIG_USE_NPTL)1765 1769 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the 1766 1770 problem and hope the cpu will stop of its own accord. For userspace 1767 1771 emulation this often isn't actually as bad as it sounds. Often 1768 1772 signals are used primarily to interrupt blocking syscalls. */ 1769 #else1770 1773 TranslationBlock *tb; 1771 1774 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; 1772 1775 1776 spin_lock(&interrupt_lock); 1773 1777 tb = env->current_tb; 1774 1778 /* if the cpu is currently executing code, we must unlink it and 1775 1779 all the potentially executing TB */ 1776 if (tb && !testandset(&interrupt_lock)) {1780 if (tb) { 1777 1781 env->current_tb = NULL; 1778 1782 tb_reset_jump_recursive(tb); 1779 resetlock(&interrupt_lock); 1780 } 1781 #endif 1783 } 1784 spin_unlock(&interrupt_lock); 1782 1785 } 1783 1786 … … 1872 1875 { 0, NULL, NULL }, 1873 1876 }; 1877 1878 #ifndef CONFIG_USER_ONLY 1879 static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list 1880 = QLIST_HEAD_INITIALIZER(memory_client_list); 1881 1882 static void cpu_notify_set_memory(target_phys_addr_t start_addr, 1883 ram_addr_t size, 1884 ram_addr_t phys_offset) 1885 { 1886 CPUPhysMemoryClient *client; 1887 QLIST_FOREACH(client, &memory_client_list, list) { 1888 client->set_memory(client, start_addr, size, phys_offset); 1889 } 1890 } 1891 1892 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start, 1893 target_phys_addr_t end) 1894 { 1895 CPUPhysMemoryClient *client; 1896 QLIST_FOREACH(client, &memory_client_list, list) { 1897 int r = client->sync_dirty_bitmap(client, start, end); 1898 if (r < 0) 1899 return r; 1900 } 1901 return 0; 1902 } 1903 1904 static int cpu_notify_migration_log(int enable) 1905 { 1906 CPUPhysMemoryClient *client; 1907 QLIST_FOREACH(client, &memory_client_list, list) { 1908 int r = client->migration_log(client, enable); 1909 if (r < 0) 1910 return r; 1911 } 1912 return 0; 1913 } 1914 1915 static void phys_page_for_each_1(CPUPhysMemoryClient *client, 1916 int level, void **lp) 1917 { 1918 int i; 1919 1920 if (*lp == NULL) { 1921 return; 1922 } 1923 if (level == 0) { 1924 PhysPageDesc *pd = *lp; 1925 for (i = 0; i < L2_SIZE; ++i) { 1926 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) { 1927 client->set_memory(client, pd[i].region_offset, 1928 TARGET_PAGE_SIZE, pd[i].phys_offset); 1929 } 1930 } 1931 } else { 1932 void **pp = *lp; 1933 for (i = 0; i < L2_SIZE; ++i) { 1934 phys_page_for_each_1(client, level - 1, pp + i); 1935 } 1936 } 1937 } 1938 1939 static void phys_page_for_each(CPUPhysMemoryClient *client) 1940 { 1941 int i; 1942 for (i = 0; i < P_L1_SIZE; ++i) { 1943 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1, 1944 l1_phys_map + 1); 1945 } 1946 } 1947 1948 void cpu_register_phys_memory_client(CPUPhysMemoryClient *client) 1949 { 1950 QLIST_INSERT_HEAD(&memory_client_list, client, list); 1951 phys_page_for_each(client); 1952 } 1953 1954 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client) 1955 { 1956 QLIST_REMOVE(client, list); 1957 } 1958 #endif 1874 1959 1875 1960 static int cmp1(const char *s1, int n, const char *s2) … … 1912 1997 return mask; 1913 1998 } 1914 #endif /* !VBOX */ 1915 1916 #ifndef VBOX /* VBOX: we have our own routine. */ 1999 1917 2000 void cpu_abort(CPUState *env, const char *fmt, ...) 1918 2001 { … … 1944 2027 va_end(ap2); 1945 2028 va_end(ap); 2029 #if defined(CONFIG_USER_ONLY) 2030 { 2031 struct sigaction act; 2032 sigfillset(&act.sa_mask); 2033 act.sa_handler = SIG_DFL; 2034 sigaction(SIGABRT, &act, NULL); 2035 } 2036 #endif 1946 2037 abort(); 1947 2038 } 1948 #endif /* !VBOX */ 1949 1950 #ifndef VBOX /* not needed */ 2039 1951 2040 CPUState *cpu_copy(CPUState *env) 1952 2041 { … … 1982 2071 return new_env; 1983 2072 } 2073 1984 2074 #endif /* !VBOX */ 1985 1986 2075 #if !defined(CONFIG_USER_ONLY) 1987 2076 … … 1999 2088 memset (&env->tb_jmp_cache[i], 0, 2000 2089 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); 2001 2002 2090 #ifdef VBOX 2091 2003 2092 /* inform raw mode about TLB page flush */ 2004 2093 remR3FlushPage(env, addr); … … 2035 2124 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); 2036 2125 2126 env->tlb_flush_addr = -1; 2127 env->tlb_flush_mask = 0; 2128 tlb_flush_count++; 2037 2129 #ifdef VBOX 2130 2038 2131 /* inform raw mode about TLB flush */ 2039 2132 remR3FlushTLB(env, flush_global); 2040 #endif 2041 tlb_flush_count++; 2133 #endif /* VBOX */ 2042 2134 } 2043 2135 … … 2062 2154 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr); 2063 2155 #endif 2156 /* Check if we need to flush due to large pages. */ 2157 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) { 2158 #if defined(DEBUG_TLB) 2159 printf("tlb_flush_page: forced full flush (" 2160 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 2161 env->tlb_flush_addr, env->tlb_flush_mask); 2162 #endif 2163 tlb_flush(env, 1); 2164 return; 2165 } 2064 2166 /* must reset current TB so that interrupts cannot modify the 2065 2167 links while we are modifying them */ … … 2084 2186 /** @todo Retest this? This function has changed... */ 2085 2187 remR3ProtectCode(cpu_single_env, ram_addr); 2086 #endif 2188 #endif /* VBOX */ 2087 2189 } 2088 2190 … … 2092 2194 target_ulong vaddr) 2093 2195 { 2094 #ifdef VBOX 2095 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2096 #endif 2097 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG; 2196 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG); 2098 2197 } 2099 2198 … … 2102 2201 { 2103 2202 unsigned long addr; 2104 2105 2203 #ifdef VBOX 2204 2106 2205 if (start & 3) 2107 2206 return; 2108 #endif 2207 #endif /* VBOX */ 2109 2208 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) { 2110 2209 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend; … … 2121 2220 CPUState *env; 2122 2221 unsigned long length, start1; 2123 int i, mask, len; 2124 uint8_t *p; 2222 int i; 2125 2223 2126 2224 start &= TARGET_PAGE_MASK; … … 2130 2228 if (length == 0) 2131 2229 return; 2132 len = length >> TARGET_PAGE_BITS; 2133 mask = ~dirty_flags; 2134 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS); 2135 #ifdef VBOX 2136 if (RT_LIKELY((start >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2137 #endif 2138 for(i = 0; i < len; i++) 2139 p[i] &= mask; 2230 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags); 2140 2231 2141 2232 /* we modify the TLB cache so that the dirty bit will be set again … … 2166 2257 2167 2258 #ifndef VBOX 2259 2168 2260 int cpu_physical_memory_set_dirty_tracking(int enable) 2169 2261 { 2262 int ret = 0; 2170 2263 in_migration = enable; 2171 if (kvm_enabled()) { 2172 return kvm_set_migration_log(enable); 2173 } 2174 return 0; 2264 ret = cpu_notify_migration_log(!!enable); 2265 return ret; 2175 2266 } 2176 2267 … … 2179 2270 return in_migration; 2180 2271 } 2272 2181 2273 #endif /* !VBOX */ 2182 2274 … … 2185 2277 { 2186 2278 #ifndef VBOX 2187 int ret = 0; 2188 2189 if (kvm_enabled()) 2190 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr); 2279 int ret; 2280 2281 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr); 2191 2282 return ret; 2192 #else 2283 #else /* VBOX */ 2193 2284 return 0; 2194 #endif 2285 #endif /* VBOX */ 2195 2286 } 2196 2287 … … 2255 2346 } 2256 2347 2257 /* add a new TLB entry. At most one entry for a given virtual address 2258 is permitted. Return 0 if OK or 2 if the page could not be mapped 2259 (can only happen in non SOFTMMU mode for I/O pages or pages 2260 conflicting with the host address space). */ 2261 int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 2262 target_phys_addr_t paddr, int prot, 2263 int mmu_idx, int is_softmmu) 2348 /* Our TLB does not support large pages, so remember the area covered by 2349 large pages and trigger a full TLB flush if these are invalidated. */ 2350 static void tlb_add_large_page(CPUState *env, target_ulong vaddr, 2351 target_ulong size) 2352 { 2353 target_ulong mask = ~(size - 1); 2354 2355 if (env->tlb_flush_addr == (target_ulong)-1) { 2356 env->tlb_flush_addr = vaddr & mask; 2357 env->tlb_flush_mask = mask; 2358 return; 2359 } 2360 /* Extend the existing region to include the new page. 2361 This is a compromise between unnecessary flushes and the cost 2362 of maintaining a full variable size TLB. */ 2363 mask &= env->tlb_flush_mask; 2364 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) { 2365 mask <<= 1; 2366 } 2367 env->tlb_flush_addr &= mask; 2368 env->tlb_flush_mask = mask; 2369 } 2370 2371 /* Add a new TLB entry. At most one entry for a given virtual address 2372 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the 2373 supplied size is only used by tlb_flush_page. */ 2374 void tlb_set_page(CPUState *env, target_ulong vaddr, 2375 target_phys_addr_t paddr, int prot, 2376 int mmu_idx, target_ulong size) 2264 2377 { 2265 2378 PhysPageDesc *p; … … 2268 2381 target_ulong address; 2269 2382 target_ulong code_address; 2270 target_phys_addr_t addend; 2271 int ret; 2383 unsigned long addend; 2272 2384 CPUTLBEntry *te; 2273 2385 CPUWatchpoint *wp; … … 2277 2389 #endif 2278 2390 2391 assert(size >= TARGET_PAGE_SIZE); 2392 if (size != TARGET_PAGE_SIZE) { 2393 tlb_add_large_page(env, vaddr, size); 2394 } 2279 2395 p = phys_page_find(paddr >> TARGET_PAGE_BITS); 2280 2396 if (!p) { … … 2288 2404 #endif 2289 2405 2290 ret = 0;2291 2406 address = vaddr; 2292 2407 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) { … … 2357 2472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) { 2358 2473 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) { 2359 iotlb = io_mem_watch + paddr; 2360 /* TODO: The memory case can be optimized by not trapping 2361 reads of pages with a write breakpoint. */ 2362 address |= TLB_MMIO; 2474 /* Avoid trapping reads of pages with a write breakpoint. */ 2475 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) { 2476 iotlb = io_mem_watch + paddr; 2477 address |= TLB_MMIO; 2478 break; 2479 } 2363 2480 } 2364 2481 } … … 2409 2526 remR3FlushPage(env, vaddr); 2410 2527 #endif 2411 return ret;2412 2528 } 2413 2529 … … 2421 2537 { 2422 2538 } 2423 2424 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,2425 target_phys_addr_t paddr, int prot,2426 int mmu_idx, int is_softmmu)2427 {2428 return 0;2429 }2430 2431 #ifndef VBOX2432 2539 2433 2540 /* … … 2435 2542 * and calls callback function 'fn' for each region. 2436 2543 */ 2437 int walk_memory_regions(void *priv, 2438 int (*fn)(void *, unsigned long, unsigned long, unsigned long)) 2439 { 2440 unsigned long start, end; 2441 PageDesc *p = NULL; 2442 int i, j, prot, prot1; 2443 int rc = 0; 2444 2445 start = end = -1; 2446 prot = 0; 2447 2448 for (i = 0; i <= L1_SIZE; i++) { 2449 p = (i < L1_SIZE) ? l1_map[i] : NULL; 2450 for (j = 0; j < L2_SIZE; j++) { 2451 prot1 = (p == NULL) ? 0 : p[j].flags; 2452 /* 2453 * "region" is one continuous chunk of memory 2454 * that has same protection flags set. 2455 */ 2456 if (prot1 != prot) { 2457 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS); 2458 if (start != -1) { 2459 rc = (*fn)(priv, start, end, prot); 2460 /* callback can stop iteration by returning != 0 */ 2461 if (rc != 0) 2462 return (rc); 2544 2545 struct walk_memory_regions_data 2546 { 2547 walk_memory_regions_fn fn; 2548 void *priv; 2549 unsigned long start; 2550 int prot; 2551 }; 2552 2553 static int walk_memory_regions_end(struct walk_memory_regions_data *data, 2554 abi_ulong end, int new_prot) 2555 { 2556 if (data->start != -1ul) { 2557 int rc = data->fn(data->priv, data->start, end, data->prot); 2558 if (rc != 0) { 2559 return rc; 2560 } 2561 } 2562 2563 data->start = (new_prot ? end : -1ul); 2564 data->prot = new_prot; 2565 2566 return 0; 2567 } 2568 2569 static int walk_memory_regions_1(struct walk_memory_regions_data *data, 2570 abi_ulong base, int level, void **lp) 2571 { 2572 abi_ulong pa; 2573 int i, rc; 2574 2575 if (*lp == NULL) { 2576 return walk_memory_regions_end(data, base, 0); 2577 } 2578 2579 if (level == 0) { 2580 PageDesc *pd = *lp; 2581 for (i = 0; i < L2_SIZE; ++i) { 2582 int prot = pd[i].flags; 2583 2584 pa = base | (i << TARGET_PAGE_BITS); 2585 if (prot != data->prot) { 2586 rc = walk_memory_regions_end(data, pa, prot); 2587 if (rc != 0) { 2588 return rc; 2463 2589 } 2464 if (prot1 != 0)2465 start = end;2466 else2467 start = -1;2468 prot = prot1;2469 2590 } 2470 if (p == NULL) 2471 break; 2472 } 2473 } 2474 return (rc); 2475 } 2476 2477 static int dump_region(void *priv, unsigned long start, 2478 unsigned long end, unsigned long prot) 2591 } 2592 } else { 2593 void **pp = *lp; 2594 for (i = 0; i < L2_SIZE; ++i) { 2595 pa = base | ((abi_ulong)i << 2596 (TARGET_PAGE_BITS + L2_BITS * level)); 2597 rc = walk_memory_regions_1(data, pa, level - 1, pp + i); 2598 if (rc != 0) { 2599 return rc; 2600 } 2601 } 2602 } 2603 2604 return 0; 2605 } 2606 2607 int walk_memory_regions(void *priv, walk_memory_regions_fn fn) 2608 { 2609 struct walk_memory_regions_data data; 2610 unsigned long i; 2611 2612 data.fn = fn; 2613 data.priv = priv; 2614 data.start = -1ul; 2615 data.prot = 0; 2616 2617 for (i = 0; i < V_L1_SIZE; i++) { 2618 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, 2619 V_L1_SHIFT / L2_BITS - 1, l1_map + i); 2620 if (rc != 0) { 2621 return rc; 2622 } 2623 } 2624 2625 return walk_memory_regions_end(&data, 0, 0); 2626 } 2627 2628 static int dump_region(void *priv, abi_ulong start, 2629 abi_ulong end, unsigned long prot) 2479 2630 { 2480 2631 FILE *f = (FILE *)priv; 2481 2632 2482 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n", 2633 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx 2634 " "TARGET_ABI_FMT_lx" %c%c%c\n", 2483 2635 start, end, end - start, 2484 2636 ((prot & PAGE_READ) ? 'r' : '-'), … … 2497 2649 } 2498 2650 2499 #endif /* !VBOX */2500 2501 2651 int page_get_flags(target_ulong address) 2502 2652 { … … 2509 2659 } 2510 2660 2511 /* modify the flags of a page and invalidate the code if2512 necessary. The flag PAGE_WRITE_ORG is positioned automatically2513 depending on PAGE_WRITE*/2661 /* Modify the flags of a page and invalidate the code if necessary. 2662 The flag PAGE_WRITE_ORG is positioned automatically depending 2663 on PAGE_WRITE. The mmap_lock should already be held. */ 2514 2664 void page_set_flags(target_ulong start, target_ulong end, int flags) 2515 2665 { 2516 PageDesc *p; 2517 target_ulong addr; 2518 2519 /* mmap_lock should already be held. */ 2666 target_ulong addr, len; 2667 2668 /* This function should never be called with addresses outside the 2669 guest address space. If this assert fires, it probably indicates 2670 a missing call to h2g_valid. */ 2671 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2672 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2673 #endif 2674 assert(start < end); 2675 2520 2676 start = start & TARGET_PAGE_MASK; 2521 2677 end = TARGET_PAGE_ALIGN(end); 2522 if (flags & PAGE_WRITE) 2678 2679 if (flags & PAGE_WRITE) { 2523 2680 flags |= PAGE_WRITE_ORG; 2681 } 2682 2524 2683 #ifdef VBOX 2525 2684 AssertMsgFailed(("We shouldn't be here, and if we should, we must have an env to do the proper locking!\n")); 2526 2685 #endif 2527 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2528 p = page_find_alloc(addr >> TARGET_PAGE_BITS); 2529 /* We may be called for host regions that are outside guest 2530 address space. */ 2531 if (!p) 2532 return; 2533 /* if the write protection is set, then we invalidate the code 2534 inside */ 2686 for (addr = start, len = end - start; 2687 len != 0; 2688 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2689 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); 2690 2691 /* If the write protection bit is set, then we invalidate 2692 the code inside. */ 2535 2693 if (!(p->flags & PAGE_WRITE) && 2536 2694 (flags & PAGE_WRITE) && … … 2548 2706 target_ulong addr; 2549 2707 2550 if (start + len < start) 2551 /* we've wrapped around */ 2708 /* This function should never be called with addresses outside the 2709 guest address space. If this assert fires, it probably indicates 2710 a missing call to h2g_valid. */ 2711 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS 2712 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); 2713 #endif 2714 2715 if (len == 0) { 2716 return 0; 2717 } 2718 if (start + len - 1 < start) { 2719 /* We've wrapped around. */ 2552 2720 return -1; 2721 } 2553 2722 2554 2723 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ 2555 2724 start = start & TARGET_PAGE_MASK; 2556 2725 2557 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) { 2726 for (addr = start, len = end - start; 2727 len != 0; 2728 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { 2558 2729 p = page_find(addr >> TARGET_PAGE_BITS); 2559 2730 if( !p ) … … 2583 2754 int page_unprotect(target_ulong address, unsigned long pc, void *puc) 2584 2755 { 2585 unsigned int p age_index, prot, pindex;2586 PageDesc *p , *p1;2756 unsigned int prot; 2757 PageDesc *p; 2587 2758 target_ulong host_start, host_end, addr; 2588 2759 … … 2592 2763 mmap_lock(); 2593 2764 2594 host_start = address & qemu_host_page_mask; 2595 page_index = host_start >> TARGET_PAGE_BITS; 2596 p1 = page_find(page_index); 2597 if (!p1) { 2765 p = page_find(address >> TARGET_PAGE_BITS); 2766 if (!p) { 2598 2767 mmap_unlock(); 2599 2768 return 0; 2600 2769 } 2601 host_end = host_start + qemu_host_page_size; 2602 p = p1; 2603 prot = 0; 2604 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) { 2605 prot |= p->flags; 2606 p++; 2607 } 2770 2608 2771 /* if the page was really writable, then we change its 2609 2772 protection back to writable */ 2610 if (prot & PAGE_WRITE_ORG) { 2611 pindex = (address - host_start) >> TARGET_PAGE_BITS; 2612 if (!(p1[pindex].flags & PAGE_WRITE)) { 2613 mprotect((void *)g2h(host_start), qemu_host_page_size, 2614 (prot & PAGE_BITS) | PAGE_WRITE); 2615 p1[pindex].flags |= PAGE_WRITE; 2773 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { 2774 host_start = address & qemu_host_page_mask; 2775 host_end = host_start + qemu_host_page_size; 2776 2777 prot = 0; 2778 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { 2779 p = page_find(addr >> TARGET_PAGE_BITS); 2780 p->flags |= PAGE_WRITE; 2781 prot |= p->flags; 2782 2616 2783 /* and since the content will be modified, we must invalidate 2617 2784 the corresponding translated code. */ 2618 tb_invalidate_phys_page(addr ess, pc, puc);2785 tb_invalidate_phys_page(addr, pc, puc); 2619 2786 #ifdef DEBUG_TB_CHECK 2620 tb_invalidate_check(address); 2621 #endif 2622 mmap_unlock(); 2623 return 1; 2624 } 2787 tb_invalidate_check(addr); 2788 #endif 2789 } 2790 mprotect((void *)g2h(host_start), qemu_host_page_size, 2791 prot & PAGE_BITS); 2792 2793 mmap_unlock(); 2794 return 1; 2625 2795 } 2626 2796 mmap_unlock(); … … 2636 2806 #if !defined(CONFIG_USER_ONLY) 2637 2807 2808 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK) 2809 typedef struct subpage_t { 2810 target_phys_addr_t base; 2811 ram_addr_t sub_io_index[TARGET_PAGE_SIZE]; 2812 ram_addr_t region_offset[TARGET_PAGE_SIZE]; 2813 } subpage_t; 2814 2638 2815 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end, 2639 2816 ram_addr_t memory, ram_addr_t region_offset); 2640 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2641 ram_addr_t orig_memory, ram_addr_t region_offset); 2817 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 2818 ram_addr_t orig_memory, 2819 ram_addr_t region_offset); 2642 2820 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \ 2643 2821 need_subpage) \ … … 2677 2855 CPUState *env; 2678 2856 ram_addr_t orig_size = size; 2679 void *subpage; 2680 2681 if (kvm_enabled()) 2682 kvm_set_phys_mem(start_addr, size, phys_offset); 2857 subpage_t *subpage; 2858 2859 #ifndef VBOX 2860 cpu_notify_set_memory(start_addr, size, phys_offset); 2861 #endif /* !VBOX */ 2683 2862 2684 2863 if (phys_offset == IO_MEM_UNASSIGNED) { … … 2697 2876 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, 2698 2877 need_subpage); 2699 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {2878 if (need_subpage) { 2700 2879 if (!(orig_memory & IO_MEM_SUBPAGE)) { 2701 2880 subpage = subpage_init((addr & TARGET_PAGE_MASK), … … 2729 2908 end_addr2, need_subpage); 2730 2909 2731 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {2910 if (need_subpage) { 2732 2911 subpage = subpage_init((addr & TARGET_PAGE_MASK), 2733 2912 &p->phys_offset, IO_MEM_UNASSIGNED, … … 2762 2941 2763 2942 #ifndef VBOX 2943 2764 2944 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) 2765 2945 { … … 2774 2954 } 2775 2955 2776 ram_addr_t qemu_ram_alloc(ram_addr_t size) 2777 { 2778 RAMBlock *new_block; 2956 void qemu_flush_coalesced_mmio_buffer(void) 2957 { 2958 if (kvm_enabled()) 2959 kvm_flush_coalesced_mmio_buffer(); 2960 } 2961 2962 #if defined(__linux__) && !defined(TARGET_S390X) 2963 2964 #include <sys/vfs.h> 2965 2966 #define HUGETLBFS_MAGIC 0x958458f6 2967 2968 static long gethugepagesize(const char *path) 2969 { 2970 struct statfs fs; 2971 int ret; 2972 2973 do { 2974 ret = statfs(path, &fs); 2975 } while (ret != 0 && errno == EINTR); 2976 2977 if (ret != 0) { 2978 perror(path); 2979 return 0; 2980 } 2981 2982 if (fs.f_type != HUGETLBFS_MAGIC) 2983 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path); 2984 2985 return fs.f_bsize; 2986 } 2987 2988 static void *file_ram_alloc(RAMBlock *block, 2989 ram_addr_t memory, 2990 const char *path) 2991 { 2992 char *filename; 2993 void *area; 2994 int fd; 2995 #ifdef MAP_POPULATE 2996 int flags; 2997 #endif 2998 unsigned long hpagesize; 2999 3000 hpagesize = gethugepagesize(path); 3001 if (!hpagesize) { 3002 return NULL; 3003 } 3004 3005 if (memory < hpagesize) { 3006 return NULL; 3007 } 3008 3009 if (kvm_enabled() && !kvm_has_sync_mmu()) { 3010 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n"); 3011 return NULL; 3012 } 3013 3014 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { 3015 return NULL; 3016 } 3017 3018 fd = mkstemp(filename); 3019 if (fd < 0) { 3020 perror("unable to create backing store for hugepages"); 3021 free(filename); 3022 return NULL; 3023 } 3024 unlink(filename); 3025 free(filename); 3026 3027 memory = (memory+hpagesize-1) & ~(hpagesize-1); 3028 3029 /* 3030 * ftruncate is not supported by hugetlbfs in older 3031 * hosts, so don't bother bailing out on errors. 3032 * If anything goes wrong with it under other filesystems, 3033 * mmap will fail. 3034 */ 3035 if (ftruncate(fd, memory)) 3036 perror("ftruncate"); 3037 3038 #ifdef MAP_POPULATE 3039 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case 3040 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED 3041 * to sidestep this quirk. 3042 */ 3043 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE; 3044 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0); 3045 #else 3046 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 3047 #endif 3048 if (area == MAP_FAILED) { 3049 perror("file_ram_alloc: can't mmap RAM pages"); 3050 close(fd); 3051 return (NULL); 3052 } 3053 block->fd = fd; 3054 return area; 3055 } 3056 #endif 3057 3058 static ram_addr_t find_ram_offset(ram_addr_t size) 3059 { 3060 RAMBlock *block, *next_block; 3061 ram_addr_t offset = 0, mingap = ULONG_MAX; 3062 3063 if (QLIST_EMPTY(&ram_list.blocks)) 3064 return 0; 3065 3066 QLIST_FOREACH(block, &ram_list.blocks, next) { 3067 ram_addr_t end, next = ULONG_MAX; 3068 3069 end = block->offset + block->length; 3070 3071 QLIST_FOREACH(next_block, &ram_list.blocks, next) { 3072 if (next_block->offset >= end) { 3073 next = MIN(next, next_block->offset); 3074 } 3075 } 3076 if (next - end >= size && next - end < mingap) { 3077 offset = end; 3078 mingap = next - end; 3079 } 3080 } 3081 return offset; 3082 } 3083 3084 static ram_addr_t last_ram_offset(void) 3085 { 3086 RAMBlock *block; 3087 ram_addr_t last = 0; 3088 3089 QLIST_FOREACH(block, &ram_list.blocks, next) 3090 last = MAX(last, block->offset + block->length); 3091 3092 return last; 3093 } 3094 3095 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, 3096 ram_addr_t size, void *host) 3097 { 3098 RAMBlock *new_block, *block; 2779 3099 2780 3100 size = TARGET_PAGE_ALIGN(size); 2781 new_block = qemu_malloc(sizeof(*new_block)); 2782 2783 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 2784 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ 2785 new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE, 2786 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 2787 #else 2788 new_block->host = qemu_vmalloc(size); 2789 #endif 2790 #ifdef MADV_MERGEABLE 2791 madvise(new_block->host, size, MADV_MERGEABLE); 2792 #endif 2793 new_block->offset = last_ram_offset; 3101 new_block = qemu_mallocz(sizeof(*new_block)); 3102 3103 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { 3104 char *id = dev->parent_bus->info->get_dev_path(dev); 3105 if (id) { 3106 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 3107 qemu_free(id); 3108 } 3109 } 3110 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 3111 3112 QLIST_FOREACH(block, &ram_list.blocks, next) { 3113 if (!strcmp(block->idstr, new_block->idstr)) { 3114 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 3115 new_block->idstr); 3116 abort(); 3117 } 3118 } 3119 3120 new_block->host = host; 3121 3122 new_block->offset = find_ram_offset(size); 2794 3123 new_block->length = size; 2795 3124 2796 new_block->next = ram_blocks; 2797 ram_blocks = new_block; 2798 2799 phys_ram_dirty = qemu_realloc(phys_ram_dirty, 2800 (last_ram_offset + size) >> TARGET_PAGE_BITS); 2801 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS), 3125 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); 3126 3127 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, 3128 last_ram_offset() >> TARGET_PAGE_BITS); 3129 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 2802 3130 0xff, size >> TARGET_PAGE_BITS); 2803 2804 last_ram_offset += size;2805 3131 2806 3132 if (kvm_enabled()) … … 2810 3136 } 2811 3137 3138 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size) 3139 { 3140 RAMBlock *new_block, *block; 3141 3142 size = TARGET_PAGE_ALIGN(size); 3143 new_block = qemu_mallocz(sizeof(*new_block)); 3144 3145 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) { 3146 char *id = dev->parent_bus->info->get_dev_path(dev); 3147 if (id) { 3148 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id); 3149 qemu_free(id); 3150 } 3151 } 3152 pstrcat(new_block->idstr, sizeof(new_block->idstr), name); 3153 3154 QLIST_FOREACH(block, &ram_list.blocks, next) { 3155 if (!strcmp(block->idstr, new_block->idstr)) { 3156 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", 3157 new_block->idstr); 3158 abort(); 3159 } 3160 } 3161 3162 if (mem_path) { 3163 #if defined (__linux__) && !defined(TARGET_S390X) 3164 new_block->host = file_ram_alloc(new_block, size, mem_path); 3165 if (!new_block->host) { 3166 new_block->host = qemu_vmalloc(size); 3167 #ifdef MADV_MERGEABLE 3168 madvise(new_block->host, size, MADV_MERGEABLE); 3169 #endif 3170 } 3171 #else 3172 fprintf(stderr, "-mem-path option unsupported\n"); 3173 exit(1); 3174 #endif 3175 } else { 3176 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 3177 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */ 3178 new_block->host = mmap((void*)0x1000000, size, 3179 PROT_EXEC|PROT_READ|PROT_WRITE, 3180 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 3181 #else 3182 new_block->host = qemu_vmalloc(size); 3183 #endif 3184 #ifdef MADV_MERGEABLE 3185 madvise(new_block->host, size, MADV_MERGEABLE); 3186 #endif 3187 } 3188 new_block->offset = find_ram_offset(size); 3189 new_block->length = size; 3190 3191 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); 3192 3193 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, 3194 last_ram_offset() >> TARGET_PAGE_BITS); 3195 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), 3196 0xff, size >> TARGET_PAGE_BITS); 3197 3198 if (kvm_enabled()) 3199 kvm_setup_guest_memory(new_block->host, size); 3200 3201 return new_block->offset; 3202 } 3203 2812 3204 void qemu_ram_free(ram_addr_t addr) 2813 3205 { 2814 /* TODO: implement this. */ 3206 RAMBlock *block; 3207 3208 QLIST_FOREACH(block, &ram_list.blocks, next) { 3209 if (addr == block->offset) { 3210 QLIST_REMOVE(block, next); 3211 if (mem_path) { 3212 #if defined (__linux__) && !defined(TARGET_S390X) 3213 if (block->fd) { 3214 munmap(block->host, block->length); 3215 close(block->fd); 3216 } else { 3217 qemu_vfree(block->host); 3218 } 3219 #endif 3220 } else { 3221 #if defined(TARGET_S390X) && defined(CONFIG_KVM) 3222 munmap(block->host, block->length); 3223 #else 3224 qemu_vfree(block->host); 3225 #endif 3226 } 3227 qemu_free(block); 3228 return; 3229 } 3230 } 3231 2815 3232 } 2816 3233 … … 2825 3242 void *qemu_get_ram_ptr(ram_addr_t addr) 2826 3243 { 2827 RAMBlock *prev;2828 RAMBlock **prevp;2829 3244 RAMBlock *block; 2830 3245 2831 prev = NULL; 2832 prevp = &ram_blocks; 2833 block = ram_blocks; 2834 while (block && (block->offset > addr 2835 || block->offset + block->length <= addr)) { 2836 if (prev) 2837 prevp = &prev->next; 2838 prev = block; 2839 block = block->next; 2840 } 2841 if (!block) { 2842 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 2843 abort(); 2844 } 2845 /* Move this entry to to start of the list. */ 2846 if (prev) { 2847 prev->next = block->next; 2848 block->next = *prevp; 2849 *prevp = block; 2850 } 2851 return block->host + (addr - block->offset); 3246 QLIST_FOREACH(block, &ram_list.blocks, next) { 3247 if (addr - block->offset < block->length) { 3248 QLIST_REMOVE(block, next); 3249 QLIST_INSERT_HEAD(&ram_list.blocks, block, next); 3250 return block->host + (addr - block->offset); 3251 } 3252 } 3253 3254 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); 3255 abort(); 3256 3257 return NULL; 2852 3258 } 2853 3259 … … 2856 3262 ram_addr_t qemu_ram_addr_from_host(void *ptr) 2857 3263 { 2858 RAMBlock *prev;2859 RAMBlock **prevp;2860 3264 RAMBlock *block; 2861 3265 uint8_t *host = ptr; 2862 3266 2863 prev = NULL; 2864 prevp = &ram_blocks; 2865 block = ram_blocks; 2866 while (block && (block->host > host 2867 || block->host + block->length <= host)) { 2868 if (prev) 2869 prevp = &prev->next; 2870 prev = block; 2871 block = block->next; 2872 } 2873 if (!block) { 2874 fprintf(stderr, "Bad ram pointer %p\n", ptr); 2875 abort(); 2876 } 2877 return block->offset + (host - block->host); 3267 QLIST_FOREACH(block, &ram_list.blocks, next) { 3268 if (host - block->host < block->length) { 3269 return block->offset + (host - block->host); 3270 } 3271 } 3272 3273 fprintf(stderr, "Bad ram pointer %p\n", ptr); 3274 abort(); 3275 3276 return 0; 2878 3277 } 2879 3278 … … 2959 3358 { 2960 3359 int dirty_flags; 2961 #ifdef VBOX 2962 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2963 dirty_flags = 0xff; 2964 else 2965 #endif /* VBOX */ 2966 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3360 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 2967 3361 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 2968 3362 #if !defined(CONFIG_USER_ONLY) 2969 3363 tb_invalidate_phys_page_fast(ram_addr, 1); 2970 # ifdef VBOX 2971 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 2972 dirty_flags = 0xff; 2973 else 2974 # endif /* VBOX */ 2975 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3364 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 2976 3365 #endif 2977 3366 } … … 2981 3370 stb_p(qemu_get_ram_ptr(ram_addr), val); 2982 3371 #endif 2983 #ifdef CONFIG_KQEMU2984 if (cpu_single_env->kqemu_enabled &&2985 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)2986 kqemu_modify_page(cpu_single_env, ram_addr);2987 #endif2988 3372 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 2989 #ifdef VBOX 2990 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 2991 #endif /* !VBOX */ 2992 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3373 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 2993 3374 /* we remove the notdirty callback only if the code has been 2994 3375 flushed */ … … 3001 3382 { 3002 3383 int dirty_flags; 3003 #ifdef VBOX 3004 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3005 dirty_flags = 0xff; 3006 else 3007 #endif /* VBOX */ 3008 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3384 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3009 3385 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 3010 3386 #if !defined(CONFIG_USER_ONLY) 3011 3387 tb_invalidate_phys_page_fast(ram_addr, 2); 3012 # ifdef VBOX 3013 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3014 dirty_flags = 0xff; 3015 else 3016 # endif /* VBOX */ 3017 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3388 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3018 3389 #endif 3019 3390 } … … 3024 3395 #endif 3025 3396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3026 #ifdef VBOX 3027 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3028 #endif 3029 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3030 3398 /* we remove the notdirty callback only if the code has been 3031 3399 flushed */ … … 3038 3406 { 3039 3407 int dirty_flags; 3040 #ifdef VBOX 3041 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3042 dirty_flags = 0xff; 3043 else 3044 #endif /* VBOX */ 3045 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3408 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3046 3409 if (!(dirty_flags & CODE_DIRTY_FLAG)) { 3047 3410 #if !defined(CONFIG_USER_ONLY) 3048 3411 tb_invalidate_phys_page_fast(ram_addr, 4); 3049 # ifdef VBOX 3050 if (RT_UNLIKELY((ram_addr >> TARGET_PAGE_BITS) >= phys_ram_dirty_size)) 3051 dirty_flags = 0xff; 3052 else 3053 # endif /* VBOX */ 3054 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS]; 3412 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr); 3055 3413 #endif 3056 3414 } … … 3061 3419 #endif 3062 3420 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG); 3063 #ifdef VBOX 3064 if (RT_LIKELY((ram_addr >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3065 #endif 3066 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags; 3421 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags); 3067 3422 /* we remove the notdirty callback only if the code has been 3068 3423 flushed */ … … 3182 3537 }; 3183 3538 3184 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr, 3185 unsigned int len) 3186 { 3187 uint32_t ret; 3188 unsigned int idx; 3189 3190 idx = SUBPAGE_IDX(addr); 3539 static inline uint32_t subpage_readlen (subpage_t *mmio, 3540 target_phys_addr_t addr, 3541 unsigned int len) 3542 { 3543 unsigned int idx = SUBPAGE_IDX(addr); 3191 3544 #if defined(DEBUG_SUBPAGE) 3192 3545 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__, 3193 3546 mmio, len, addr, idx); 3194 3547 #endif 3195 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], 3196 addr + mmio->region_offset[idx][0][len]);3197 3198 return ret;3548 3549 addr += mmio->region_offset[idx]; 3550 idx = mmio->sub_io_index[idx]; 3551 return io_mem_read[idx][len](io_mem_opaque[idx], addr); 3199 3552 } 3200 3553 3201 3554 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr, 3202 uint32_t value, unsigned int len) 3203 { 3204 unsigned int idx; 3205 3206 idx = SUBPAGE_IDX(addr); 3555 uint32_t value, unsigned int len) 3556 { 3557 unsigned int idx = SUBPAGE_IDX(addr); 3207 3558 #if defined(DEBUG_SUBPAGE) 3208 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__, 3209 mmio, len, addr, idx, value); 3210 #endif 3211 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], 3212 addr + mmio->region_offset[idx][1][len], 3213 value); 3559 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", 3560 __func__, mmio, len, addr, idx, value); 3561 #endif 3562 3563 addr += mmio->region_offset[idx]; 3564 idx = mmio->sub_io_index[idx]; 3565 io_mem_write[idx][len](io_mem_opaque[idx], addr, value); 3214 3566 } 3215 3567 3216 3568 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr) 3217 3569 { 3218 #if defined(DEBUG_SUBPAGE)3219 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3220 #endif3221 3222 3570 return subpage_readlen(opaque, addr, 0); 3223 3571 } … … 3226 3574 uint32_t value) 3227 3575 { 3228 #if defined(DEBUG_SUBPAGE)3229 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);3230 #endif3231 3576 subpage_writelen(opaque, addr, value, 0); 3232 3577 } … … 3234 3579 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr) 3235 3580 { 3236 #if defined(DEBUG_SUBPAGE)3237 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3238 #endif3239 3240 3581 return subpage_readlen(opaque, addr, 1); 3241 3582 } … … 3244 3585 uint32_t value) 3245 3586 { 3246 #if defined(DEBUG_SUBPAGE)3247 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);3248 #endif3249 3587 subpage_writelen(opaque, addr, value, 1); 3250 3588 } … … 3252 3590 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr) 3253 3591 { 3254 #if defined(DEBUG_SUBPAGE)3255 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);3256 #endif3257 3258 3592 return subpage_readlen(opaque, addr, 2); 3259 3593 } 3260 3594 3261 static void subpage_writel (void *opaque, 3262 target_phys_addr_t addr, uint32_t value) 3263 { 3264 #if defined(DEBUG_SUBPAGE) 3265 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value); 3266 #endif 3595 static void subpage_writel (void *opaque, target_phys_addr_t addr, 3596 uint32_t value) 3597 { 3267 3598 subpage_writelen(opaque, addr, value, 2); 3268 3599 } … … 3284 3615 { 3285 3616 int idx, eidx; 3286 unsigned int i;3287 3617 3288 3618 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE) … … 3294 3624 mmio, start, end, idx, eidx, memory); 3295 3625 #endif 3296 memory >>= IO_MEM_SHIFT;3626 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 3297 3627 for (; idx <= eidx; idx++) { 3298 for (i = 0; i < 4; i++) { 3299 if (io_mem_read[memory][i]) { 3300 mmio->mem_read[idx][i] = &io_mem_read[memory][i]; 3301 mmio->opaque[idx][0][i] = io_mem_opaque[memory]; 3302 mmio->region_offset[idx][0][i] = region_offset; 3303 } 3304 if (io_mem_write[memory][i]) { 3305 mmio->mem_write[idx][i] = &io_mem_write[memory][i]; 3306 mmio->opaque[idx][1][i] = io_mem_opaque[memory]; 3307 mmio->region_offset[idx][1][i] = region_offset; 3308 } 3309 } 3628 mmio->sub_io_index[idx] = memory; 3629 mmio->region_offset[idx] = region_offset; 3310 3630 } 3311 3631 … … 3313 3633 } 3314 3634 3315 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3316 ram_addr_t orig_memory, ram_addr_t region_offset) 3635 static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys, 3636 ram_addr_t orig_memory, 3637 ram_addr_t region_offset) 3317 3638 { 3318 3639 subpage_t *mmio; … … 3328 3649 #endif 3329 3650 *phys = subpage_memory | IO_MEM_SUBPAGE; 3330 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory, 3331 region_offset); 3651 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset); 3332 3652 3333 3653 return mmio; … … 3343 3663 return i; 3344 3664 } 3345 3665 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES); 3346 3666 return -1; 3347 3667 } … … 3359 3679 void *opaque) 3360 3680 { 3361 int i , subwidth = 0;3681 int i; 3362 3682 3363 3683 if (io_index <= 0) { … … 3371 3691 } 3372 3692 3373 for(i = 0;i < 3; i++) { 3374 if (!mem_read[i] || !mem_write[i]) 3375 subwidth = IO_MEM_SUBWIDTH; 3376 io_mem_read[io_index][i] = mem_read[i]; 3377 io_mem_write[io_index][i] = mem_write[i]; 3693 for (i = 0; i < 3; ++i) { 3694 io_mem_read[io_index][i] 3695 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]); 3696 } 3697 for (i = 0; i < 3; ++i) { 3698 io_mem_write[io_index][i] 3699 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]); 3378 3700 } 3379 3701 io_mem_opaque[io_index] = opaque; 3380 return (io_index << IO_MEM_SHIFT) | subwidth; 3702 3703 return (io_index << IO_MEM_SHIFT); 3381 3704 } 3382 3705 … … 3419 3742 /* physical memory access (slow version, mainly for debug) */ 3420 3743 #if defined(CONFIG_USER_ONLY) 3421 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,3422 3744 int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 3745 uint8_t *buf, int len, int is_write) 3423 3746 { 3424 3747 int l, flags; … … 3433 3756 flags = page_get_flags(page); 3434 3757 if (!(flags & PAGE_VALID)) 3435 return ;3758 return -1; 3436 3759 if (is_write) { 3437 3760 if (!(flags & PAGE_WRITE)) 3438 return ;3761 return -1; 3439 3762 /* XXX: this code should not depend on lock_user */ 3440 3763 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0))) 3441 /* FIXME - should this return an error rather than just fail? */ 3442 return; 3764 return -1; 3443 3765 memcpy(p, buf, l); 3444 3766 unlock_user(p, addr, l); 3445 3767 } else { 3446 3768 if (!(flags & PAGE_READ)) 3447 return ;3769 return -1; 3448 3770 /* XXX: this code should not depend on lock_user */ 3449 3771 if (!(p = lock_user(VERIFY_READ, addr, l, 1))) 3450 /* FIXME - should this return an error rather than just fail? */ 3451 return; 3772 return -1; 3452 3773 memcpy(buf, p, l); 3453 3774 unlock_user(p, addr, 0); … … 3457 3778 addr += l; 3458 3779 } 3780 return 0; 3459 3781 } 3460 3782 … … 3532 3854 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3533 3855 /* set dirty bit */ 3534 #ifdef VBOX 3535 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3536 #endif 3537 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3538 (0xff & ~CODE_DIRTY_FLAG); 3856 cpu_physical_memory_set_dirty_flags( 3857 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3539 3858 } 3540 3859 } … … 3760 4079 tb_invalidate_phys_page_range(addr1, addr1 + l, 0); 3761 4080 /* set dirty bit */ 3762 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=3763 (0xff & ~CODE_DIRTY_FLAG);4081 cpu_physical_memory_set_dirty_flags( 4082 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3764 4083 } 3765 4084 addr1 += l; … … 3865 4184 } 3866 4185 3867 /* XXX: optimize*/4186 /* warning: addr must be aligned */ 3868 4187 uint32_t lduw_phys(target_phys_addr_t addr) 3869 4188 { 3870 uint16_t val; 3871 cpu_physical_memory_read(addr, (uint8_t *)&val, 2); 3872 return tswap16(val); 4189 int io_index; 4190 uint8_t *ptr; 4191 uint64_t val; 4192 unsigned long pd; 4193 PhysPageDesc *p; 4194 4195 p = phys_page_find(addr >> TARGET_PAGE_BITS); 4196 if (!p) { 4197 pd = IO_MEM_UNASSIGNED; 4198 } else { 4199 pd = p->phys_offset; 4200 } 4201 4202 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 4203 !(pd & IO_MEM_ROMD)) { 4204 /* I/O case */ 4205 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4206 if (p) 4207 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4208 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr); 4209 } else { 4210 /* RAM case */ 4211 #ifndef VBOX 4212 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) + 4213 (addr & ~TARGET_PAGE_MASK); 4214 val = lduw_p(ptr); 4215 #else 4216 val = remR3PhysReadU16((pd & TARGET_PAGE_MASK) | (addr & ~TARGET_PAGE_MASK)); 4217 #endif 4218 } 4219 return val; 3873 4220 } 3874 4221 … … 3910 4257 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 3911 4258 /* set dirty bit */ 3912 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=3913 (0xff & ~CODE_DIRTY_FLAG);4259 cpu_physical_memory_set_dirty_flags( 4260 addr1, (0xff & ~CODE_DIRTY_FLAG)); 3914 4261 } 3915 4262 } … … 3988 4335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0); 3989 4336 /* set dirty bit */ 3990 #ifdef VBOX 3991 if (RT_LIKELY((addr1 >> TARGET_PAGE_BITS) < phys_ram_dirty_size)) 3992 #endif 3993 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 3994 (0xff & ~CODE_DIRTY_FLAG); 4337 cpu_physical_memory_set_dirty_flags(addr1, 4338 (0xff & ~CODE_DIRTY_FLAG)); 3995 4339 } 3996 4340 } … … 4004 4348 } 4005 4349 4006 /* XXX: optimize*/4350 /* warning: addr must be aligned */ 4007 4351 void stw_phys(target_phys_addr_t addr, uint32_t val) 4008 4352 { 4009 uint16_t v = tswap16(val); 4010 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2); 4353 int io_index; 4354 uint8_t *ptr; 4355 unsigned long pd; 4356 PhysPageDesc *p; 4357 4358 p = phys_page_find(addr >> TARGET_PAGE_BITS); 4359 if (!p) { 4360 pd = IO_MEM_UNASSIGNED; 4361 } else { 4362 pd = p->phys_offset; 4363 } 4364 4365 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) { 4366 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1); 4367 if (p) 4368 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset; 4369 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val); 4370 } else { 4371 unsigned long addr1; 4372 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK); 4373 /* RAM case */ 4374 #ifndef VBOX 4375 ptr = qemu_get_ram_ptr(addr1); 4376 stw_p(ptr, val); 4377 #else 4378 remR3PhysWriteU16(addr1, val); NOREF(ptr); 4379 #endif 4380 if (!cpu_physical_memory_is_dirty(addr1)) { 4381 /* invalidate code */ 4382 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0); 4383 /* set dirty bit */ 4384 cpu_physical_memory_set_dirty_flags(addr1, 4385 (0xff & ~CODE_DIRTY_FLAG)); 4386 } 4387 } 4011 4388 } 4012 4389 … … 4017 4394 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8); 4018 4395 } 4019 4020 #endif4021 4396 4022 4397 #ifndef VBOX … … 4039 4414 l = len; 4040 4415 phys_addr += (addr & ~TARGET_PAGE_MASK); 4041 #if !defined(CONFIG_USER_ONLY)4042 4416 if (is_write) 4043 4417 cpu_physical_memory_write_rom(phys_addr, buf, l); 4044 4418 else 4045 #endif4046 4419 cpu_physical_memory_rw(phys_addr, buf, l, is_write); 4047 4420 len -= l; … … 4052 4425 } 4053 4426 #endif /* !VBOX */ 4427 #endif 4054 4428 4055 4429 /* in deterministic execution mode, instructions doing device I/Os … … 4111 4485 cpu_resume_from_signal(env, NULL); 4112 4486 } 4487 4488 #if !defined(CONFIG_USER_ONLY) 4113 4489 4114 4490 #ifndef VBOX … … 4167 4543 #endif /* !VBOX */ 4168 4544 4169 #if !defined(CONFIG_USER_ONLY)4170 4171 4545 #define MMUSUFFIX _cmmu 4172 4546 #define GETPC() NULL -
trunk/src/recompiler/fpu/softfloat-native.c
r37675 r37689 13 13 (defined(CONFIG_SOLARIS) && (CONFIG_SOLARIS_VERSION < 10 || CONFIG_SOLARIS_VERSION == 11)) /* VBOX adds sol 11 */ 14 14 fpsetround(val); 15 #elif defined(__arm__)16 /* nothing to do */17 15 #else 18 16 fesetround(val); … … 36 34 #define remainderf(fa, fb) ((float)remainder(fa, fb)) 37 35 #define rintf(f) ((float)rint(f)) 38 /* Some defines which only apply to *BSD */ 39 # if defined(VBOX) && defined(HOST_BSD) 36 # if defined(VBOX) && defined(HOST_BSD) /* Some defines which only apply to *BSD */ 40 37 # define lrintl(f) ((int32_t)rint(f)) 41 38 # define llrintl(f) ((int64_t)rint(f)) … … 374 371 float64 float64_round_to_int( float64 a STATUS_PARAM ) 375 372 { 376 #if defined(__arm__)377 switch(STATUS(float_rounding_mode)) {378 default:379 case float_round_nearest_even:380 asm("rndd %0, %1" : "=f" (a) : "f"(a));381 break;382 case float_round_down:383 asm("rnddm %0, %1" : "=f" (a) : "f"(a));384 break;385 case float_round_up:386 asm("rnddp %0, %1" : "=f" (a) : "f"(a));387 break;388 case float_round_to_zero:389 asm("rnddz %0, %1" : "=f" (a) : "f"(a));390 break;391 }392 #else393 373 return rint(a); 394 #endif395 374 } 396 375 -
trunk/src/recompiler/fpu/softfloat-native.h
r37677 r37689 3 3 #include <math.h> 4 4 5 #if (defined(_BSD) && !defined(__APPLE__) && !defined(__FreeBSD__)) || defined(CONFIG_SOLARIS) /* VBox: Added __FreeBSD__ */ 5 #if (defined(CONFIG_BSD) && !defined(__APPLE__) && !defined(__GLIBC__) && !defined(__FreeBSD__)) \ 6 || defined(CONFIG_SOLARIS) /* VBox: Added __FreeBSD__ */ 6 7 #include <ieeefp.h> 7 8 #define fabsf(f) ((float)fabs(f)) … … 23 24 #if defined(CONFIG_SOLARIS) && \ 24 25 ((CONFIG_SOLARIS_VERSION <= 9 ) || \ 25 ((CONFIG_SOLARIS_VERSION >= 10) && (__GNUC__ < 4))) \26 ((CONFIG_SOLARIS_VERSION == 10) && (__GNUC__ < 4))) \ 26 27 || (defined(__OpenBSD__) && (OpenBSD < 200811)) 27 28 /* … … 56 57 57 58 58 # if !defined(VBOX) || !defined(isnormal) || !defined(isgreater) || !defined(isgreaterequal) || !defined(isless) || !defined(islessequal) || !defined(isunordered)59 59 #define isnormal(x) (fpclass(x) >= FP_NZERO) 60 60 #define isgreater(x, y) ((!unordered(x, y)) && ((x) > (y))) … … 63 63 #define islessequal(x, y) ((!unordered(x, y)) && ((x) <= (y))) 64 64 #define isunordered(x,y) unordered(x, y) 65 # endif /* !VBOX || missing */66 65 #endif 67 66 … … 128 127 float_round_up = FP_RP, 129 128 float_round_to_zero = FP_RZ 130 };131 #elif defined(__arm__)132 enum {133 float_round_nearest_even = 0,134 float_round_down = 1,135 float_round_up = 2,136 float_round_to_zero = 3137 129 }; 138 130 #else -
trunk/src/recompiler/fpu/softfloat-specialize.h
r36175 r37689 62 62 #if defined(TARGET_SPARC) 63 63 #define float32_default_nan make_float32(0x7FFFFFFF) 64 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) 64 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) 65 65 #define float32_default_nan make_float32(0x7FC00000) 66 66 #elif defined(TARGET_HPPA) … … 190 190 #if defined(TARGET_SPARC) 191 191 #define float64_default_nan make_float64(LIT64( 0x7FFFFFFFFFFFFFFF )) 192 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) 192 #elif defined(TARGET_POWERPC) || defined(TARGET_ARM) || defined(TARGET_ALPHA) 193 193 #define float64_default_nan make_float64(LIT64( 0x7FF8000000000000 )) 194 194 #elif defined(TARGET_HPPA) -
trunk/src/recompiler/fpu/softfloat.c
r37675 r37689 1911 1911 float32 float32_rem( float32 a, float32 b STATUS_PARAM ) 1912 1912 { 1913 flag aSign, bSign,zSign;1913 flag aSign, zSign; 1914 1914 int16 aExp, bExp, expDiff; 1915 1915 bits32 aSig, bSig; … … 1924 1924 bSig = extractFloat32Frac( b ); 1925 1925 bExp = extractFloat32Exp( b ); 1926 bSign = extractFloat32Sign( b );1927 1926 if ( aExp == 0xFF ) { 1928 1927 if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) { … … 2055 2054 return roundAndPackFloat32( 0, zExp, zSig STATUS_VAR ); 2056 2055 2056 } 2057 2058 /*---------------------------------------------------------------------------- 2059 | Returns the binary exponential of the single-precision floating-point value 2060 | `a'. The operation is performed according to the IEC/IEEE Standard for 2061 | Binary Floating-Point Arithmetic. 2062 | 2063 | Uses the following identities: 2064 | 2065 | 1. ------------------------------------------------------------------------- 2066 | x x*ln(2) 2067 | 2 = e 2068 | 2069 | 2. ------------------------------------------------------------------------- 2070 | 2 3 4 5 n 2071 | x x x x x x x 2072 | e = 1 + --- + --- + --- + --- + --- + ... + --- + ... 2073 | 1! 2! 3! 4! 5! n! 2074 *----------------------------------------------------------------------------*/ 2075 2076 static const float64 float32_exp2_coefficients[15] = 2077 { 2078 make_float64( 0x3ff0000000000000ll ), /* 1 */ 2079 make_float64( 0x3fe0000000000000ll ), /* 2 */ 2080 make_float64( 0x3fc5555555555555ll ), /* 3 */ 2081 make_float64( 0x3fa5555555555555ll ), /* 4 */ 2082 make_float64( 0x3f81111111111111ll ), /* 5 */ 2083 make_float64( 0x3f56c16c16c16c17ll ), /* 6 */ 2084 make_float64( 0x3f2a01a01a01a01all ), /* 7 */ 2085 make_float64( 0x3efa01a01a01a01all ), /* 8 */ 2086 make_float64( 0x3ec71de3a556c734ll ), /* 9 */ 2087 make_float64( 0x3e927e4fb7789f5cll ), /* 10 */ 2088 make_float64( 0x3e5ae64567f544e4ll ), /* 11 */ 2089 make_float64( 0x3e21eed8eff8d898ll ), /* 12 */ 2090 make_float64( 0x3de6124613a86d09ll ), /* 13 */ 2091 make_float64( 0x3da93974a8c07c9dll ), /* 14 */ 2092 make_float64( 0x3d6ae7f3e733b81fll ), /* 15 */ 2093 }; 2094 2095 float32 float32_exp2( float32 a STATUS_PARAM ) 2096 { 2097 flag aSign; 2098 int16 aExp; 2099 bits32 aSig; 2100 float64 r, x, xn; 2101 int i; 2102 2103 aSig = extractFloat32Frac( a ); 2104 aExp = extractFloat32Exp( a ); 2105 aSign = extractFloat32Sign( a ); 2106 2107 if ( aExp == 0xFF) { 2108 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR ); 2109 return (aSign) ? float32_zero : a; 2110 } 2111 if (aExp == 0) { 2112 if (aSig == 0) return float32_one; 2113 } 2114 2115 float_raise( float_flag_inexact STATUS_VAR); 2116 2117 /* ******************************* */ 2118 /* using float64 for approximation */ 2119 /* ******************************* */ 2120 x = float32_to_float64(a STATUS_VAR); 2121 x = float64_mul(x, float64_ln2 STATUS_VAR); 2122 2123 xn = x; 2124 r = float64_one; 2125 for (i = 0 ; i < 15 ; i++) { 2126 float64 f; 2127 2128 f = float64_mul(xn, float32_exp2_coefficients[i] STATUS_VAR); 2129 r = float64_add(r, f STATUS_VAR); 2130 2131 xn = float64_mul(xn, x STATUS_VAR); 2132 } 2133 2134 return float64_to_float32(r, status); 2057 2135 } 2058 2136 … … 3063 3141 float64 float64_rem( float64 a, float64 b STATUS_PARAM ) 3064 3142 { 3065 flag aSign, bSign,zSign;3143 flag aSign, zSign; 3066 3144 int16 aExp, bExp, expDiff; 3067 3145 bits64 aSig, bSig; … … 3074 3152 bSig = extractFloat64Frac( b ); 3075 3153 bExp = extractFloat64Exp( b ); 3076 bSign = extractFloat64Sign( b );3077 3154 if ( aExp == 0x7FF ) { 3078 3155 if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) { … … 4033 4110 floatx80 floatx80_rem( floatx80 a, floatx80 b STATUS_PARAM ) 4034 4111 { 4035 flag aSign, bSign,zSign;4112 flag aSign, zSign; 4036 4113 int32 aExp, bExp, expDiff; 4037 4114 bits64 aSig0, aSig1, bSig; … … 4044 4121 bSig = extractFloatx80Frac( b ); 4045 4122 bExp = extractFloatx80Exp( b ); 4046 bSign = extractFloatx80Sign( b );4047 4123 if ( aExp == 0x7FFF ) { 4048 4124 if ( (bits64) ( aSig0<<1 ) … … 5145 5221 float128 float128_rem( float128 a, float128 b STATUS_PARAM ) 5146 5222 { 5147 flag aSign, bSign,zSign;5223 flag aSign, zSign; 5148 5224 int32 aExp, bExp, expDiff; 5149 5225 bits64 aSig0, aSig1, bSig0, bSig1, q, term0, term1, term2; … … 5159 5235 bSig0 = extractFloat128Frac0( b ); 5160 5236 bExp = extractFloat128Exp( b ); 5161 bSign = extractFloat128Sign( b );5162 5237 if ( aExp == 0x7FFF ) { 5163 5238 if ( ( aSig0 | aSig1 ) -
trunk/src/recompiler/fpu/softfloat.h
r37675 r37689 284 284 float32 float32_rem( float32, float32 STATUS_PARAM ); 285 285 float32 float32_sqrt( float32 STATUS_PARAM ); 286 float32 float32_exp2( float32 STATUS_PARAM ); 286 287 float32 float32_log2( float32 STATUS_PARAM ); 287 288 int float32_eq( float32, float32 STATUS_PARAM ); … … 324 325 #define float32_zero make_float32(0) 325 326 #define float32_one make_float32(0x3f800000) 327 #define float32_ln2 make_float32(0x3f317218) 326 328 327 329 /*---------------------------------------------------------------------------- … … 395 397 #define float64_zero make_float64(0) 396 398 #define float64_one make_float64(0x3ff0000000000000LL) 399 #define float64_ln2 make_float64(0x3fe62e42fefa39efLL) 397 400 398 401 #ifdef FLOATX80 -
trunk/src/recompiler/gen-icount.h
r37675 r37689 1 #include "qemu-timer.h" 2 1 3 /* Helpers for instruction counting code generation. */ 2 4 -
trunk/src/recompiler/ioport.h
r37675 r37689 52 52 uint32_t cpu_inl(pio_addr_t addr); 53 53 #else 54 void cpu_outb(CPU State *env, pio_addr_t addr, uint8_t val);55 void cpu_outw(CPU State *env, pio_addr_t addr, uint16_t val);56 void cpu_outl(CPU State *env, pio_addr_t addr, uint32_t val);57 uint8_t cpu_inb(CPU State *env, pio_addr_t addr);58 uint16_t cpu_inw(CPU State *env, pio_addr_t addr);59 uint32_t cpu_inl(CPU State *env, pio_addr_t addr);54 void cpu_outb(CPUX86State *env, pio_addr_t addr, uint8_t val); 55 void cpu_outw(CPUX86State *env, pio_addr_t addr, uint16_t val); 56 void cpu_outl(CPUX86State *env, pio_addr_t addr, uint32_t val); 57 uint8_t cpu_inb(CPUX86State *env, pio_addr_t addr); 58 uint16_t cpu_inw(CPUX86State *env, pio_addr_t addr); 59 uint32_t cpu_inl(CPUX86State *env, pio_addr_t addr); 60 60 #endif 61 61 -
trunk/src/recompiler/qemu-common.h
r37675 r37689 39 39 # define QEMU_WARN_UNUSED_RESULT 40 40 # endif 41 #define QEMU_BUILD_BUG_ON(x) typedef char __build_bug_on__##__LINE__[(x)?-1:1]; 42 43 #include <stdio.h> 44 #include "cpu.h" 45 41 46 42 47 #else /* !VBOX */ … … 48 53 49 54 #define QEMU_NORETURN __attribute__ ((__noreturn__)) 55 #ifdef CONFIG_GCC_ATTRIBUTE_WARN_UNUSED_RESULT 56 #define QEMU_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) 57 #else 58 #define QEMU_WARN_UNUSED_RESULT 59 #endif 60 61 #define QEMU_BUILD_BUG_ON(x) typedef char __build_bug_on__##__LINE__[(x)?-1:1]; 62 63 typedef struct QEMUTimer QEMUTimer; 64 typedef struct QEMUFile QEMUFile; 65 typedef struct QEMUBH QEMUBH; 66 typedef struct DeviceState DeviceState; 67 50 68 51 69 /* Hack around the mess dyngen-exec.h causes: We need QEMU_NORETURN in files that … … 58 76 #include <stdio.h> 59 77 #include <stdarg.h> 78 #include <stdbool.h> 60 79 #include <string.h> 61 80 #include <strings.h> … … 132 151 133 152 /* bottom halves */ 134 typedef struct QEMUBH QEMUBH;135 136 153 typedef void QEMUBHFunc(void *opaque); 137 154 … … 168 185 int qemu_fls(int i); 169 186 int qemu_fdatasync(int fd); 187 int fcntl_setfl(int fd, int flag); 170 188 171 189 /* path.c */ … … 196 214 char *qemu_strndup(const char *str, size_t size); 197 215 198 void *get_mmap_addr(unsigned long size);199 200 201 216 void qemu_mutex_lock_iothread(void); 202 217 void qemu_mutex_unlock_iothread(void); 203 218 204 219 int qemu_open(const char *name, int flags, ...); 220 ssize_t qemu_write_full(int fd, const void *buf, size_t count) 221 QEMU_WARN_UNUSED_RESULT; 205 222 void qemu_set_cloexec(int fd); 206 223 207 224 #ifndef _WIN32 225 int qemu_eventfd(int pipefd[2]); 208 226 int qemu_pipe(int pipefd[2]); 209 227 #endif … … 216 234 /* IO callbacks. */ 217 235 typedef void IOReadHandler(void *opaque, const uint8_t *buf, int size); 218 typedef int IOCanR WHandler(void *opaque);236 typedef int IOCanReadHandler(void *opaque); 219 237 typedef void IOHandler(void *opaque); 220 238 … … 243 261 typedef struct VLANState VLANState; 244 262 typedef struct VLANClientState VLANClientState; 245 typedef struct QEMUFile QEMUFile;246 263 typedef struct i2c_bus i2c_bus; 247 264 typedef struct i2c_slave i2c_slave; 248 265 typedef struct SMBusDevice SMBusDevice; 249 typedef struct QEMUTimer QEMUTimer;250 266 typedef struct PCIHostState PCIHostState; 251 267 typedef struct PCIExpressHost PCIExpressHost; … … 258 274 typedef struct uWireSlave uWireSlave; 259 275 typedef struct I2SCodec I2SCodec; 260 typedef struct DeviceState DeviceState;261 276 typedef struct SSIBus SSIBus; 277 typedef struct EventNotifier EventNotifier; 278 typedef struct VirtIODevice VirtIODevice; 279 280 typedef uint64_t pcibus_t; 281 282 void cpu_exec_init_all(unsigned long tb_size); 262 283 263 284 /* CPU save/load. */ … … 274 295 void qemu_cpu_kick(void *env); 275 296 int qemu_cpu_self(void *env); 297 298 /* work queue */ 299 struct qemu_work_item { 300 struct qemu_work_item *next; 301 void (*func)(void *data); 302 void *data; 303 int done; 304 }; 276 305 277 306 #ifdef CONFIG_USER_ONLY -
trunk/src/recompiler/qemu-log.h
r37675 r37689 25 25 */ 26 26 #define qemu_loglevel_mask(b) ((loglevel & (b)) != 0) 27 28 27 29 28 -
trunk/src/recompiler/softmmu_exec.h
r36175 r37689 101 101 #endif /* (NB_MMU_MODES >= 5) */ 102 102 103 #if (NB_MMU_MODES > 5) 104 #error "NB_MMU_MODES > 5 is not supported for now" 105 #endif /* (NB_MMU_MODES > 5) */ 103 #if (NB_MMU_MODES >= 6) 104 105 #define ACCESS_TYPE 5 106 #define MEMSUFFIX MMU_MODE5_SUFFIX 107 #define DATA_SIZE 1 108 #include "softmmu_header.h" 109 110 #define DATA_SIZE 2 111 #include "softmmu_header.h" 112 113 #define DATA_SIZE 4 114 #include "softmmu_header.h" 115 116 #define DATA_SIZE 8 117 #include "softmmu_header.h" 118 #undef ACCESS_TYPE 119 #undef MEMSUFFIX 120 #endif /* (NB_MMU_MODES >= 6) */ 121 122 #if (NB_MMU_MODES > 6) 123 #error "NB_MMU_MODES > 6 is not supported for now" 124 #endif /* (NB_MMU_MODES > 6) */ 106 125 107 126 /* these access are slower, they must be as rare as possible */ -
trunk/src/recompiler/softmmu_header.h
r36175 r37689 71 71 #define RES_TYPE uint64_t 72 72 #else 73 #define RES_TYPE int73 #define RES_TYPE uint32_t 74 74 #endif 75 75 -
trunk/src/recompiler/softmmu_template.h
r37675 r37689 26 26 * of the LGPL is applied is otherwise unspecified. 27 27 */ 28 #include "qemu-timer.h" 28 29 29 30 #define DATA_SIZE (1 << SHIFT) … … 117 118 int index; 118 119 target_ulong tlb_addr; 119 target_phys_addr_t addend; 120 target_phys_addr_t ioaddr; 121 unsigned long addend; 120 122 void *retaddr; 121 123 … … 131 133 goto do_unaligned_access; 132 134 retaddr = GETPC(); 133 addend= env->iotlb[mmu_idx][index];134 res = glue(io_read, SUFFIX)( addend, addr, retaddr);135 ioaddr = env->iotlb[mmu_idx][index]; 136 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); 135 137 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 136 138 /* slow unaligned access (it spans two pages or IO) */ … … 173 175 DATA_TYPE res, res1, res2; 174 176 int index, shift; 175 target_phys_addr_t addend; 177 target_phys_addr_t ioaddr; 178 unsigned long addend; 176 179 target_ulong tlb_addr, addr1, addr2; 177 180 … … 184 187 if ((addr & (DATA_SIZE - 1)) != 0) 185 188 goto do_unaligned_access; 186 retaddr = GETPC(); 187 addend = env->iotlb[mmu_idx][index]; 188 res = glue(io_read, SUFFIX)(addend, addr, retaddr); 189 ioaddr = env->iotlb[mmu_idx][index]; 190 res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr); 189 191 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 190 192 do_unaligned_access: … … 255 257 int mmu_idx) 256 258 { 257 target_phys_addr_t addend; 259 target_phys_addr_t ioaddr; 260 unsigned long addend; 258 261 target_ulong tlb_addr; 259 262 void *retaddr; … … 269 272 goto do_unaligned_access; 270 273 retaddr = GETPC(); 271 addend= env->iotlb[mmu_idx][index];272 glue(io_write, SUFFIX)( addend, val, addr, retaddr);274 ioaddr = env->iotlb[mmu_idx][index]; 275 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); 273 276 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 274 277 do_unaligned_access: … … 308 311 void *retaddr) 309 312 { 310 target_phys_addr_t addend; 313 target_phys_addr_t ioaddr; 314 unsigned long addend; 311 315 target_ulong tlb_addr; 312 316 int index, i; … … 320 324 if ((addr & (DATA_SIZE - 1)) != 0) 321 325 goto do_unaligned_access; 322 addend= env->iotlb[mmu_idx][index];323 glue(io_write, SUFFIX)( addend, val, addr, retaddr);326 ioaddr = env->iotlb[mmu_idx][index]; 327 glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr); 324 328 } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { 325 329 do_unaligned_access: -
trunk/src/recompiler/target-i386/cpu.h
r37675 r37689 58 58 #include "softfloat.h" 59 59 60 #if defined(VBOX)60 #ifdef VBOX 61 61 # include <iprt/critsect.h> 62 62 # include <iprt/thread.h> … … 758 758 uint32_t sipi_vector; 759 759 760 uint32_t cpuid_kvm_features; 761 760 762 /* in order to simplify APIC support, we leave this pointer to the 761 763 user */ 762 struct APICState *apic_state;764 struct DeviceState *apic_state; 763 765 764 766 uint64 mcg_cap; … … 773 775 uint16_t fptag_vmstate; 774 776 uint16_t fpregs_format_vmstate; 777 778 uint64_t xstate_bv; 779 XMMReg ymmh_regs[CPU_NB_REGS]; 780 781 uint64_t xcr0; 775 782 #else /* VBOX */ 776 783 … … 783 790 /** Profiling tb_flush. */ 784 791 STAMPROFILE StatTbFlush; 792 793 /** Addends for HVA -> GPA translations. */ 794 target_phys_addr_t phys_addends[NB_MMU_MODES][CPU_TLB_SIZE]; 785 795 #endif /* VBOX */ 786 796 } CPUX86State; … … 906 916 int cpu_x86_exec(CPUX86State *s); 907 917 void cpu_x86_close(CPUX86State *s); 908 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, 909 ...)); 918 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...), 919 const char *optarg); 920 void x86_cpudef_setup(void); 921 910 922 int cpu_get_pic_interrupt(CPUX86State *s); 911 923 /* MSDOS compatibility mode FPU exception support */ … … 974 986 } 975 987 988 static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env, 989 int sipi_vector) 990 { 991 env->eip = 0; 992 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, 993 sipi_vector << 12, 994 env->segs[R_CS].limit, 995 env->segs[R_CS].flags); 996 env->halted = 0; 997 } 998 976 999 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 977 1000 target_ulong *base, unsigned int *limit, … … 1006 1029 void *puc); 1007 1030 1031 /* cpuid.c */ 1032 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 1033 uint32_t *eax, uint32_t *ebx, 1034 uint32_t *ecx, uint32_t *edx); 1035 int cpu_x86_register (CPUX86State *env, const char *cpu_model); 1036 void cpu_clear_apic_feature(CPUX86State *env); 1037 1008 1038 /* helper.c */ 1009 1039 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, … … 1011 1041 #define cpu_handle_mmu_fault cpu_x86_handle_mmu_fault 1012 1042 void cpu_x86_set_a20(CPUX86State *env, int a20_state); 1013 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,1014 uint32_t *eax, uint32_t *ebx,1015 uint32_t *ecx, uint32_t *edx);1016 1043 1017 1044 static inline int hw_breakpoint_enabled(unsigned long dr7, int index) … … 1039 1066 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); 1040 1067 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); 1041 1042 /* hw/apic.c */1043 void cpu_set_apic_base(CPUX86State *env, uint64_t val);1044 uint64_t cpu_get_apic_base(CPUX86State *env);1045 void cpu_set_apic_tpr(CPUX86State *env, uint8_t val);1046 #ifndef NO_CPU_IO_DEFS1047 uint8_t cpu_get_apic_tpr(CPUX86State *env);1048 #endif1049 1068 1050 1069 /* hw/pc.c */ … … 1078 1097 #define TARGET_PAGE_BITS 12 1079 1098 1099 #ifdef TARGET_X86_64 1100 #define TARGET_PHYS_ADDR_SPACE_BITS 52 1101 /* ??? This is really 48 bits, sign-extended, but the only thing 1102 accessible to userland with bit 48 set is the VSYSCALL, and that 1103 is handled via other mechanisms. */ 1104 #define TARGET_VIRT_ADDR_SPACE_BITS 47 1105 #else 1106 #define TARGET_PHYS_ADDR_SPACE_BITS 36 1107 #define TARGET_VIRT_ADDR_SPACE_BITS 32 1108 #endif 1109 1080 1110 #define cpu_init cpu_x86_init 1081 1111 #define cpu_exec cpu_x86_exec 1082 1112 #define cpu_gen_code cpu_x86_gen_code 1083 1113 #define cpu_signal_handler cpu_x86_signal_handler 1084 #define cpu_list x86_cpu_list 1085 1086 #define CPU_SAVE_VERSION 11 1114 #define cpu_list_id x86_cpu_list 1115 #define cpudef_setup x86_cpudef_setup 1116 1117 #define CPU_SAVE_VERSION 12 1087 1118 1088 1119 /* MMU modes definitions */ … … 1113 1144 1114 1145 #include "cpu-all.h" 1115 #include "exec-all.h"1116 1117 1146 #include "svm.h" 1118 1147 1119 static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) 1120 { 1121 env->eip = tb->pc - tb->cs_base; 1122 } 1148 #ifndef VBOX 1149 #if !defined(CONFIG_USER_ONLY) 1150 #include "hw/apic.h" 1151 #endif 1152 #else /* VBOX */ 1153 extern void cpu_set_apic_tpr(CPUX86State *env, uint8_t val); 1154 extern uint8_t cpu_get_apic_tpr(CPUX86State *env); 1155 extern uint64_t cpu_get_apic_base(CPUX86State *env); 1156 #endif /* VBOX */ 1123 1157 1124 1158 static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc, -
trunk/src/recompiler/target-i386/exec.h
r37675 r37689 84 84 void QEMU_NORETURN raise_exception_err(int exception_index, int error_code); 85 85 void QEMU_NORETURN raise_exception(int exception_index); 86 void QEMU_NORETURN raise_exception_env(int exception_index, CPUState *nenv); 86 87 void do_smm_enter(void); 87 88 … … 301 302 } 302 303 303 static inline void env_to_regs(void)304 {305 #ifdef reg_EAX306 EAX = env->regs[R_EAX];307 #endif308 #ifdef reg_ECX309 ECX = env->regs[R_ECX];310 #endif311 #ifdef reg_EDX312 EDX = env->regs[R_EDX];313 #endif314 #ifdef reg_EBX315 EBX = env->regs[R_EBX];316 #endif317 #ifdef reg_ESP318 ESP = env->regs[R_ESP];319 #endif320 #ifdef reg_EBP321 EBP = env->regs[R_EBP];322 #endif323 #ifdef reg_ESI324 ESI = env->regs[R_ESI];325 #endif326 #ifdef reg_EDI327 EDI = env->regs[R_EDI];328 #endif329 }330 331 static inline void regs_to_env(void)332 {333 #ifdef reg_EAX334 env->regs[R_EAX] = EAX;335 #endif336 #ifdef reg_ECX337 env->regs[R_ECX] = ECX;338 #endif339 #ifdef reg_EDX340 env->regs[R_EDX] = EDX;341 #endif342 #ifdef reg_EBX343 env->regs[R_EBX] = EBX;344 #endif345 #ifdef reg_ESP346 env->regs[R_ESP] = ESP;347 #endif348 #ifdef reg_EBP349 env->regs[R_EBP] = EBP;350 #endif351 #ifdef reg_ESI352 env->regs[R_ESI] = ESI;353 #endif354 #ifdef reg_EDI355 env->regs[R_EDI] = EDI;356 #endif357 }358 359 304 static inline int cpu_has_work(CPUState *env) 360 305 { … … 393 338 env->hflags |= HF_SVME_MASK; 394 339 } 340 341 static inline void cpu_pc_from_tb(CPUState *env, TranslationBlock *tb) 342 { 343 env->eip = tb->pc - tb->cs_base; 344 } 345 -
trunk/src/recompiler/target-i386/helper.c
r37675 r37689 43 43 //#define DEBUG_MMU 44 44 45 #ifndef VBOX46 /* feature flags taken from "Intel Processor Identification and the CPUID47 * Instruction" and AMD's "CPUID Specification". In cases of disagreement48 * about feature names, the Linux name is used. */49 static const char *feature_name[] = {50 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",51 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",52 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",53 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",54 };55 static const char *ext_feature_name[] = {56 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",57 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,58 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",60 };61 static const char *ext2_feature_name[] = {62 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",64 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",65 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",66 };67 static const char *ext3_feature_name[] = {68 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",69 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,71 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,72 };73 74 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,75 uint32_t *ext_features,76 uint32_t *ext2_features,77 uint32_t *ext3_features)78 {79 int i;80 int found = 0;81 82 for ( i = 0 ; i < 32 ; i++ )83 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {84 *features |= 1 << i;85 found = 1;86 }87 for ( i = 0 ; i < 32 ; i++ )88 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {89 *ext_features |= 1 << i;90 found = 1;91 }92 for ( i = 0 ; i < 32 ; i++ )93 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {94 *ext2_features |= 1 << i;95 found = 1;96 }97 for ( i = 0 ; i < 32 ; i++ )98 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {99 *ext3_features |= 1 << i;100 found = 1;101 }102 if (!found) {103 fprintf(stderr, "CPU feature %s not found\n", flagname);104 }105 }106 #endif /* !VBOX */107 108 typedef struct x86_def_t {109 const char *name;110 uint32_t level;111 uint32_t vendor1, vendor2, vendor3;112 int family;113 int model;114 int stepping;115 uint32_t features, ext_features, ext2_features, ext3_features;116 uint32_t xlevel;117 char model_id[48];118 int vendor_override;119 } x86_def_t;120 121 #ifndef VBOX122 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)123 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \124 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)125 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \126 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \127 CPUID_PSE36 | CPUID_FXSR)128 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)129 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \130 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \131 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \132 CPUID_PAE | CPUID_SEP | CPUID_APIC)133 static x86_def_t x86_defs[] = {134 #ifdef TARGET_X86_64135 {136 .name = "qemu64",137 .level = 4,138 .vendor1 = CPUID_VENDOR_AMD_1,139 .vendor2 = CPUID_VENDOR_AMD_2,140 .vendor3 = CPUID_VENDOR_AMD_3,141 .family = 6,142 .model = 2,143 .stepping = 3,144 .features = PPRO_FEATURES |145 /* these features are needed for Win64 and aren't fully implemented */146 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |147 /* this feature is needed for Solaris and isn't fully implemented */148 CPUID_PSE36,149 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,152 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |153 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,154 .xlevel = 0x8000000A,155 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,156 },157 {158 .name = "phenom",159 .level = 5,160 .vendor1 = CPUID_VENDOR_AMD_1,161 .vendor2 = CPUID_VENDOR_AMD_2,162 .vendor3 = CPUID_VENDOR_AMD_3,163 .family = 16,164 .model = 2,165 .stepping = 3,166 /* Missing: CPUID_VME, CPUID_HT */167 .features = PPRO_FEATURES |168 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |169 CPUID_PSE36,170 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |171 CPUID_EXT_POPCNT,172 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */173 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |174 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |175 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |176 CPUID_EXT2_FFXSR,177 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,178 CPUID_EXT3_CR8LEG,179 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,180 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */181 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |182 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,183 .xlevel = 0x8000001A,184 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"185 },186 {187 .name = "core2duo",188 .level = 10,189 .family = 6,190 .model = 15,191 .stepping = 11,192 /* The original CPU also implements these features:193 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,194 CPUID_TM, CPUID_PBE */195 .features = PPRO_FEATURES |196 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |197 CPUID_PSE36,198 /* The original CPU also implements these ext features:199 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,200 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */201 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,202 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,203 .ext3_features = CPUID_EXT3_LAHF_LM,204 .xlevel = 0x80000008,205 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",206 },207 {208 .name = "kvm64",209 .level = 5,210 .vendor1 = CPUID_VENDOR_INTEL_1,211 .vendor2 = CPUID_VENDOR_INTEL_2,212 .vendor3 = CPUID_VENDOR_INTEL_3,213 .family = 15,214 .model = 6,215 .stepping = 1,216 /* Missing: CPUID_VME, CPUID_HT */217 .features = PPRO_FEATURES |218 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |219 CPUID_PSE36,220 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */221 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,222 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */223 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |224 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,225 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,226 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,227 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,228 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */229 .ext3_features = 0,230 .xlevel = 0x80000008,231 .model_id = "Common KVM processor"232 },233 #endif234 {235 .name = "qemu32",236 .level = 4,237 .family = 6,238 .model = 3,239 .stepping = 3,240 .features = PPRO_FEATURES,241 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,242 .xlevel = 0,243 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,244 },245 {246 .name = "coreduo",247 .level = 10,248 .family = 6,249 .model = 14,250 .stepping = 8,251 /* The original CPU also implements these features:252 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,253 CPUID_TM, CPUID_PBE */254 .features = PPRO_FEATURES | CPUID_VME |255 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,256 /* The original CPU also implements these ext features:257 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,258 CPUID_EXT_PDCM */259 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,260 .ext2_features = CPUID_EXT2_NX,261 .xlevel = 0x80000008,262 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",263 },264 {265 .name = "486",266 .level = 0,267 .family = 4,268 .model = 0,269 .stepping = 0,270 .features = I486_FEATURES,271 .xlevel = 0,272 },273 {274 .name = "pentium",275 .level = 1,276 .family = 5,277 .model = 4,278 .stepping = 3,279 .features = PENTIUM_FEATURES,280 .xlevel = 0,281 },282 {283 .name = "pentium2",284 .level = 2,285 .family = 6,286 .model = 5,287 .stepping = 2,288 .features = PENTIUM2_FEATURES,289 .xlevel = 0,290 },291 {292 .name = "pentium3",293 .level = 2,294 .family = 6,295 .model = 7,296 .stepping = 3,297 .features = PENTIUM3_FEATURES,298 .xlevel = 0,299 },300 {301 .name = "athlon",302 .level = 2,303 .vendor1 = CPUID_VENDOR_AMD_1,304 .vendor2 = CPUID_VENDOR_AMD_2,305 .vendor3 = CPUID_VENDOR_AMD_3,306 .family = 6,307 .model = 2,308 .stepping = 3,309 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,310 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,311 .xlevel = 0x80000008,312 /* XXX: put another string ? */313 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,314 },315 {316 .name = "n270",317 /* original is on level 10 */318 .level = 5,319 .family = 6,320 .model = 28,321 .stepping = 2,322 .features = PPRO_FEATURES |323 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,324 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |325 * CPUID_HT | CPUID_TM | CPUID_PBE */326 /* Some CPUs got no CPUID_SEP */327 .ext_features = CPUID_EXT_MONITOR |328 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,329 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |330 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */331 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,332 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */333 .xlevel = 0x8000000A,334 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",335 },336 };337 338 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,339 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);340 341 static int cpu_x86_fill_model_id(char *str)342 {343 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;344 int i;345 346 for (i = 0; i < 3; i++) {347 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);348 memcpy(str + i * 16 + 0, &eax, 4);349 memcpy(str + i * 16 + 4, &ebx, 4);350 memcpy(str + i * 16 + 8, &ecx, 4);351 memcpy(str + i * 16 + 12, &edx, 4);352 }353 return 0;354 }355 356 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)357 {358 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;359 360 x86_cpu_def->name = "host";361 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);362 x86_cpu_def->level = eax;363 x86_cpu_def->vendor1 = ebx;364 x86_cpu_def->vendor2 = edx;365 x86_cpu_def->vendor3 = ecx;366 367 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);368 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);369 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);370 x86_cpu_def->stepping = eax & 0x0F;371 x86_cpu_def->ext_features = ecx;372 x86_cpu_def->features = edx;373 374 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);375 x86_cpu_def->xlevel = eax;376 377 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);378 x86_cpu_def->ext2_features = edx;379 x86_cpu_def->ext3_features = ecx;380 cpu_x86_fill_model_id(x86_cpu_def->model_id);381 x86_cpu_def->vendor_override = 0;382 383 return 0;384 }385 386 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)387 {388 unsigned int i;389 x86_def_t *def;390 391 char *s = strdup(cpu_model);392 char *featurestr, *name = strtok(s, ",");393 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;394 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;395 uint32_t numvalue;396 397 def = NULL;398 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {399 if (strcmp(name, x86_defs[i].name) == 0) {400 def = &x86_defs[i];401 break;402 }403 }404 if (kvm_enabled() && strcmp(name, "host") == 0) {405 cpu_x86_fill_host(x86_cpu_def);406 } else if (!def) {407 goto error;408 } else {409 memcpy(x86_cpu_def, def, sizeof(*def));410 }411 412 add_flagname_to_bitmaps("hypervisor", &plus_features,413 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);414 415 featurestr = strtok(NULL, ",");416 417 while (featurestr) {418 char *val;419 if (featurestr[0] == '+') {420 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);421 } else if (featurestr[0] == '-') {422 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);423 } else if ((val = strchr(featurestr, '='))) {424 *val = 0; val++;425 if (!strcmp(featurestr, "family")) {426 char *err;427 numvalue = strtoul(val, &err, 0);428 if (!*val || *err) {429 fprintf(stderr, "bad numerical value %s\n", val);430 goto error;431 }432 x86_cpu_def->family = numvalue;433 } else if (!strcmp(featurestr, "model")) {434 char *err;435 numvalue = strtoul(val, &err, 0);436 if (!*val || *err || numvalue > 0xff) {437 fprintf(stderr, "bad numerical value %s\n", val);438 goto error;439 }440 x86_cpu_def->model = numvalue;441 } else if (!strcmp(featurestr, "stepping")) {442 char *err;443 numvalue = strtoul(val, &err, 0);444 if (!*val || *err || numvalue > 0xf) {445 fprintf(stderr, "bad numerical value %s\n", val);446 goto error;447 }448 x86_cpu_def->stepping = numvalue ;449 } else if (!strcmp(featurestr, "level")) {450 char *err;451 numvalue = strtoul(val, &err, 0);452 if (!*val || *err) {453 fprintf(stderr, "bad numerical value %s\n", val);454 goto error;455 }456 x86_cpu_def->level = numvalue;457 } else if (!strcmp(featurestr, "xlevel")) {458 char *err;459 numvalue = strtoul(val, &err, 0);460 if (!*val || *err) {461 fprintf(stderr, "bad numerical value %s\n", val);462 goto error;463 }464 if (numvalue < 0x80000000) {465 numvalue += 0x80000000;466 }467 x86_cpu_def->xlevel = numvalue;468 } else if (!strcmp(featurestr, "vendor")) {469 if (strlen(val) != 12) {470 fprintf(stderr, "vendor string must be 12 chars long\n");471 goto error;472 }473 x86_cpu_def->vendor1 = 0;474 x86_cpu_def->vendor2 = 0;475 x86_cpu_def->vendor3 = 0;476 for(i = 0; i < 4; i++) {477 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);478 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);479 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);480 }481 x86_cpu_def->vendor_override = 1;482 } else if (!strcmp(featurestr, "model_id")) {483 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),484 val);485 } else {486 fprintf(stderr, "unrecognized feature %s\n", featurestr);487 goto error;488 }489 } else {490 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);491 goto error;492 }493 featurestr = strtok(NULL, ",");494 }495 x86_cpu_def->features |= plus_features;496 x86_cpu_def->ext_features |= plus_ext_features;497 x86_cpu_def->ext2_features |= plus_ext2_features;498 x86_cpu_def->ext3_features |= plus_ext3_features;499 x86_cpu_def->features &= ~minus_features;500 x86_cpu_def->ext_features &= ~minus_ext_features;501 x86_cpu_def->ext2_features &= ~minus_ext2_features;502 x86_cpu_def->ext3_features &= ~minus_ext3_features;503 free(s);504 return 0;505 506 error:507 free(s);508 return -1;509 }510 511 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))512 {513 unsigned int i;514 515 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)516 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);517 }518 #endif /* !VBOX */519 520 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)521 {522 #ifndef VBOX523 x86_def_t def1, *def = &def1;524 525 if (cpu_x86_find_by_name(def, cpu_model) < 0)526 return -1;527 if (def->vendor1) {528 env->cpuid_vendor1 = def->vendor1;529 env->cpuid_vendor2 = def->vendor2;530 env->cpuid_vendor3 = def->vendor3;531 } else {532 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;533 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;534 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;535 }536 env->cpuid_vendor_override = def->vendor_override;537 env->cpuid_level = def->level;538 if (def->family > 0x0f)539 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);540 else541 env->cpuid_version = def->family << 8;542 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);543 env->cpuid_version |= def->stepping;544 env->cpuid_features = def->features;545 env->pat = 0x0007040600070406ULL;546 env->cpuid_ext_features = def->ext_features;547 env->cpuid_ext2_features = def->ext2_features;548 env->cpuid_xlevel = def->xlevel;549 env->cpuid_ext3_features = def->ext3_features;550 {551 const char *model_id = def->model_id;552 int c, len, i;553 if (!model_id)554 model_id = "";555 len = strlen(model_id);556 for(i = 0; i < 48; i++) {557 if (i >= len)558 c = '\0';559 else560 c = (uint8_t)model_id[i];561 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));562 }563 }564 #endif /* !VBOX */565 return 0;566 }567 568 45 /* NOTE: must be called outside the CPU execute loop */ 569 46 void cpu_reset(CPUX86State *env) … … 620 97 621 98 env->eip = 0xfff0; 622 #ifndef VBOX 99 #ifndef VBOX /* We'll get the right value from CPUM. */ 623 100 env->regs[R_EDX] = env->cpuid_version; 624 #else625 /** @todo: is it right? */626 env->regs[R_EDX] = 0x600; /* indicate P6 processor */627 101 #endif 628 102 … … 762 236 } 763 237 }; 764 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] 765 [(sc->flags & DESC_TYPE_MASK) 766 >> DESC_TYPE_SHIFT]); 238 cpu_fprintf(f, "%s", 239 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0] 240 [(sc->flags & DESC_TYPE_MASK) 241 >> DESC_TYPE_SHIFT]); 767 242 } 768 243 done: … … 909 384 } 910 385 } 386 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); 911 387 if (flags & X86_DUMP_FPU) { 912 388 int fptag; … … 1026 502 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)); 1027 503 #ifdef VBOX 1028 1029 504 remR3ChangeCpuMode(env); 1030 505 #endif … … 1081 556 } 1082 557 1083 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)1084 {1085 return addr;1086 }1087 1088 558 #else 1089 559 … … 1100 570 0 = nothing more to do 1101 571 1 = generate PF fault 1102 2 = soft MMU activation required for this block1103 572 */ 1104 573 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, … … 1107 576 uint64_t ptep, pte; 1108 577 target_ulong pde_addr, pte_addr; 1109 int error_code, is_dirty, prot, page_size, ret,is_write, is_user;578 int error_code, is_dirty, prot, page_size, is_write, is_user; 1110 579 target_phys_addr_t paddr; 1111 580 uint32_t page_offset; … … 1368 837 vaddr = virt_addr + page_offset; 1369 838 1370 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);1371 return ret;839 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size); 840 return 0; 1372 841 do_fault_protect: 1373 842 error_code = PG_ERROR_P_MASK; … … 1557 1026 static CPUDebugExcpHandler *prev_debug_excp_handler; 1558 1027 1559 void raise_exception (int exception_index);1028 void raise_exception_env(int exception_index, CPUState *env); 1560 1029 1561 1030 static void breakpoint_handler(CPUState *env) … … 1567 1036 env->watchpoint_hit = NULL; 1568 1037 if (check_hw_breakpoints(env, 0)) 1569 raise_exception (EXCP01_DB);1038 raise_exception_env(EXCP01_DB, env); 1570 1039 else 1571 1040 cpu_resume_from_signal(env, NULL); … … 1576 1045 if (bp->flags & BP_CPU) { 1577 1046 check_hw_breakpoints(env, 1); 1578 raise_exception (EXCP01_DB);1047 raise_exception_env(EXCP01_DB, env); 1579 1048 } 1580 1049 break; … … 1584 1053 prev_debug_excp_handler(env); 1585 1054 } 1586 1587 1055 1588 1056 #ifndef VBOX … … 1659 1127 } 1660 1128 1661 static void host_cpuid(uint32_t function, uint32_t count,1662 uint32_t *eax, uint32_t *ebx,1663 uint32_t *ecx, uint32_t *edx)1664 {1665 #if defined(CONFIG_KVM)1666 uint32_t vec[4];1667 1668 #ifdef __x86_64__1669 asm volatile("cpuid"1670 : "=a"(vec[0]), "=b"(vec[1]),1671 "=c"(vec[2]), "=d"(vec[3])1672 : "0"(function), "c"(count) : "cc");1673 #else1674 asm volatile("pusha \n\t"1675 "cpuid \n\t"1676 "mov %%eax, 0(%2) \n\t"1677 "mov %%ebx, 4(%2) \n\t"1678 "mov %%ecx, 8(%2) \n\t"1679 "mov %%edx, 12(%2) \n\t"1680 "popa"1681 : : "a"(function), "c"(count), "S"(vec)1682 : "memory", "cc");1683 #endif1684 1685 if (eax)1686 *eax = vec[0];1687 if (ebx)1688 *ebx = vec[1];1689 if (ecx)1690 *ecx = vec[2];1691 if (edx)1692 *edx = vec[3];1693 #endif1694 }1695 1696 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,1697 uint32_t *ecx, uint32_t *edx)1698 {1699 *ebx = env->cpuid_vendor1;1700 *edx = env->cpuid_vendor2;1701 *ecx = env->cpuid_vendor3;1702 1703 /* sysenter isn't supported on compatibility mode on AMD, syscall1704 * isn't supported in compatibility mode on Intel.1705 * Normally we advertise the actual cpu vendor, but you can override1706 * this if you want to use KVM's sysenter/syscall emulation1707 * in compatibility mode and when doing cross vendor migration1708 */1709 if (kvm_enabled() && env->cpuid_vendor_override) {1710 host_cpuid(0, 0, NULL, ebx, ecx, edx);1711 }1712 }1713 1714 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,1715 uint32_t *eax, uint32_t *ebx,1716 uint32_t *ecx, uint32_t *edx)1717 {1718 /* test if maximum index reached */1719 if (index & 0x80000000) {1720 if (index > env->cpuid_xlevel)1721 index = env->cpuid_level;1722 } else {1723 if (index > env->cpuid_level)1724 index = env->cpuid_level;1725 }1726 1727 switch(index) {1728 case 0:1729 *eax = env->cpuid_level;1730 get_cpuid_vendor(env, ebx, ecx, edx);1731 break;1732 case 1:1733 *eax = env->cpuid_version;1734 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */1735 *ecx = env->cpuid_ext_features;1736 *edx = env->cpuid_features;1737 if (env->nr_cores * env->nr_threads > 1) {1738 *ebx |= (env->nr_cores * env->nr_threads) << 16;1739 *edx |= 1 << 28; /* HTT bit */1740 }1741 break;1742 case 2:1743 /* cache info: needed for Pentium Pro compatibility */1744 *eax = 1;1745 *ebx = 0;1746 *ecx = 0;1747 *edx = 0x2c307d;1748 break;1749 case 4:1750 /* cache info: needed for Core compatibility */1751 if (env->nr_cores > 1) {1752 *eax = (env->nr_cores - 1) << 26;1753 } else {1754 *eax = 0;1755 }1756 switch (count) {1757 case 0: /* L1 dcache info */1758 *eax |= 0x0000121;1759 *ebx = 0x1c0003f;1760 *ecx = 0x000003f;1761 *edx = 0x0000001;1762 break;1763 case 1: /* L1 icache info */1764 *eax |= 0x0000122;1765 *ebx = 0x1c0003f;1766 *ecx = 0x000003f;1767 *edx = 0x0000001;1768 break;1769 case 2: /* L2 cache info */1770 *eax |= 0x0000143;1771 if (env->nr_threads > 1) {1772 *eax |= (env->nr_threads - 1) << 14;1773 }1774 *ebx = 0x3c0003f;1775 *ecx = 0x0000fff;1776 *edx = 0x0000001;1777 break;1778 default: /* end of info */1779 *eax = 0;1780 *ebx = 0;1781 *ecx = 0;1782 *edx = 0;1783 break;1784 }1785 break;1786 case 5:1787 /* mwait info: needed for Core compatibility */1788 *eax = 0; /* Smallest monitor-line size in bytes */1789 *ebx = 0; /* Largest monitor-line size in bytes */1790 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;1791 *edx = 0;1792 break;1793 case 6:1794 /* Thermal and Power Leaf */1795 *eax = 0;1796 *ebx = 0;1797 *ecx = 0;1798 *edx = 0;1799 break;1800 case 9:1801 /* Direct Cache Access Information Leaf */1802 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */1803 *ebx = 0;1804 *ecx = 0;1805 *edx = 0;1806 break;1807 case 0xA:1808 /* Architectural Performance Monitoring Leaf */1809 *eax = 0;1810 *ebx = 0;1811 *ecx = 0;1812 *edx = 0;1813 break;1814 case 0x80000000:1815 *eax = env->cpuid_xlevel;1816 *ebx = env->cpuid_vendor1;1817 *edx = env->cpuid_vendor2;1818 *ecx = env->cpuid_vendor3;1819 break;1820 case 0x80000001:1821 *eax = env->cpuid_version;1822 *ebx = 0;1823 *ecx = env->cpuid_ext3_features;1824 *edx = env->cpuid_ext2_features;1825 1826 /* The Linux kernel checks for the CMPLegacy bit and1827 * discards multiple thread information if it is set.1828 * So dont set it here for Intel to make Linux guests happy.1829 */1830 if (env->nr_cores * env->nr_threads > 1) {1831 uint32_t tebx, tecx, tedx;1832 get_cpuid_vendor(env, &tebx, &tecx, &tedx);1833 if (tebx != CPUID_VENDOR_INTEL_1 ||1834 tedx != CPUID_VENDOR_INTEL_2 ||1835 tecx != CPUID_VENDOR_INTEL_3) {1836 *ecx |= 1 << 1; /* CmpLegacy bit */1837 }1838 }1839 1840 if (kvm_enabled()) {1841 /* Nested SVM not yet supported in upstream QEMU */1842 *ecx &= ~CPUID_EXT3_SVM;1843 }1844 break;1845 case 0x80000002:1846 case 0x80000003:1847 case 0x80000004:1848 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];1849 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];1850 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];1851 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];1852 break;1853 case 0x80000005:1854 /* cache info (L1 cache) */1855 *eax = 0x01ff01ff;1856 *ebx = 0x01ff01ff;1857 *ecx = 0x40020140;1858 *edx = 0x40020140;1859 break;1860 case 0x80000006:1861 /* cache info (L2 cache) */1862 *eax = 0;1863 *ebx = 0x42004200;1864 *ecx = 0x02008140;1865 *edx = 0;1866 break;1867 case 0x80000008:1868 /* virtual & phys address size in low 2 bytes. */1869 /* XXX: This value must match the one used in the MMU code. */1870 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {1871 /* 64 bit processor */1872 /* XXX: The physical address space is limited to 42 bits in exec.c. */1873 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */1874 } else {1875 if (env->cpuid_features & CPUID_PSE36)1876 *eax = 0x00000024; /* 36 bits physical */1877 else1878 *eax = 0x00000020; /* 32 bits physical */1879 }1880 *ebx = 0;1881 *ecx = 0;1882 *edx = 0;1883 if (env->nr_cores * env->nr_threads > 1) {1884 *ecx |= (env->nr_cores * env->nr_threads) - 1;1885 }1886 break;1887 case 0x8000000A:1888 *eax = 0x00000001; /* SVM Revision */1889 *ebx = 0x00000010; /* nr of ASIDs */1890 *ecx = 0;1891 *edx = 0; /* optional features */1892 break;1893 default:1894 /* reserved values: zero */1895 *eax = 0;1896 *ebx = 0;1897 *ecx = 0;1898 *edx = 0;1899 break;1900 }1901 }1902 1903 1904 1129 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, 1905 1130 target_ulong *base, unsigned int *limit, … … 1959 1184 #endif 1960 1185 } 1186 #ifndef VBOX 1961 1187 if (cpu_x86_register(env, cpu_model) < 0) { 1962 1188 cpu_x86_close(env); 1963 1189 return NULL; 1964 1190 } 1965 #ifndef VBOX1966 1191 mce_init(env); 1967 1192 #endif … … 1979 1204 cpu_reset(env); 1980 1205 env->interrupt_request = sipi; 1981 apic_init_reset(env); 1206 apic_init_reset(env->apic_state); 1207 env->halted = !cpu_is_bsp(env); 1982 1208 } 1983 1209 1984 1210 void do_cpu_sipi(CPUState *env) 1985 1211 { 1986 apic_sipi(env );1212 apic_sipi(env->apic_state); 1987 1213 } 1988 1214 #else -
trunk/src/recompiler/target-i386/op_helper.c
r37675 r37689 27 27 */ 28 28 29 #define CPU_NO_GLOBAL_REGS30 29 #include "exec.h" 31 30 #include "exec-all.h" 32 31 #include "host-utils.h" 32 #include "ioport.h" 33 33 34 34 #ifdef VBOX … … 37 37 # include "tcg.h" 38 38 #endif /* VBOX */ 39 39 40 //#define DEBUG_PCALL 40 41 … … 370 371 raise_exception_err(EXCP0A_TSS, selector & 0xfffc); 371 372 #ifdef VBOX 372 # if 0 373 /** @todo: now we ignore loading 0 selectors, need to check what is correct once */ 373 # if 0 /** @todo now we ignore loading 0 selectors, need to check what is correct once */ 374 374 cpu_x86_load_seg_cache(env, seg_reg, selector, 375 375 0, 0, 0); … … 1760 1760 } 1761 1761 1762 void raise_exception_env(int exception_index, CPUState *nenv) 1763 { 1764 env = nenv; 1765 raise_exception(exception_index); 1766 } 1762 1767 /* SMM support */ 1763 1768 … … 3445 3450 case 8: 3446 3451 if (!(env->hflags2 & HF2_VINTR_MASK)) { 3452 #ifndef VBOX 3453 val = cpu_get_apic_tpr(env->apic_state); 3454 #else /* VBOX */ 3447 3455 val = cpu_get_apic_tpr(env); 3456 #endif /* VBOX */ 3448 3457 } else { 3449 3458 val = env->v_tpr; … … 3469 3478 case 8: 3470 3479 if (!(env->hflags2 & HF2_VINTR_MASK)) { 3480 #ifndef VBOX 3481 cpu_set_apic_tpr(env->apic_state, t0); 3482 #else /* VBOX */ 3471 3483 cpu_set_apic_tpr(env, t0); 3484 #endif /* VBOX */ 3472 3485 } 3473 3486 env->v_tpr = t0 & 0x0f; … … 3534 3547 void helper_rdtscp(void) 3535 3548 { 3549 helper_rdtsc(); 3536 3550 #ifndef VBOX 3537 helper_rdtsc();3538 3551 ECX = (uint32_t)(env->tsc_aux); 3539 #else 3552 #else /* VBOX */ 3540 3553 uint64_t val; 3541 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {3542 raise_exception(EXCP0D_GPF);3543 }3544 3545 val = cpu_get_tsc(env);3546 EAX = (uint32_t)(val);3547 EDX = (uint32_t)(val >> 32);3548 3554 if (cpu_rdmsr(env, MSR_K8_TSC_AUX, &val) == 0) 3549 3555 ECX = (uint32_t)(val); … … 3603 3609 case MSR_IA32_APICBASE: 3604 3610 # ifndef VBOX /* The CPUMSetGuestMsr call below does this now. */ 3605 cpu_set_apic_base(env , val);3611 cpu_set_apic_base(env->apic_state, val); 3606 3612 # endif 3607 3613 break; … … 3749 3755 break; 3750 3756 case MSR_IA32_APICBASE: 3757 #ifndef VBOX 3758 val = cpu_get_apic_base(env->apic_state); 3759 #else /* VBOX */ 3751 3760 val = cpu_get_apic_base(env); 3761 #endif /* VBOX */ 3752 3762 break; 3753 3763 case MSR_EFER: … … 6643 6653 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), 6644 6654 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err))); 6655 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); 6645 6656 6646 6657 env->hflags2 &= ~HF2_GIF_MASK; -
trunk/src/recompiler/target-i386/ops_sse_header.h
r37675 r37689 41 41 #define dh_ctype_XMMReg XMMReg * 42 42 #define dh_ctype_MMXReg MMXReg * 43 #define dh_is_signed_Reg dh_is_signed_ptr 44 #define dh_is_signed_XMMReg dh_is_signed_ptr 45 #define dh_is_signed_MMXReg dh_is_signed_ptr 43 46 44 47 DEF_HELPER_2(glue(psrlw, SUFFIX), void, Reg, Reg) -
trunk/src/recompiler/target-i386/translate.c
r37675 r37689 85 85 static TCGv cpu_tmp5; 86 86 87 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE]; 88 87 89 #include "gen-icount.h" 88 90 … … 752 754 } 753 755 754 # if 0 /* unused code? */755 static void gen_check_external_event2()756 {757 gen_helper_check_external_event();758 }759 # endif760 761 756 #endif /* VBOX */ 762 757 … … 931 926 gen_op_set_cc_op(s->cc_op); 932 927 gen_jmp_im(cur_eip); 933 state_saved = 1;934 928 } 935 929 svm_flags |= (1 << (4 + ot)); … … 2381 2375 override = s->override; 2382 2376 must_add_seg = 1; 2383 } else {2384 override = R_DS;2385 2377 } 2386 2378 if (must_add_seg) { … … 2488 2480 2489 2481 cc_op = s->cc_op; 2490 if (s->cc_op != CC_OP_DYNAMIC) { 2491 gen_op_set_cc_op(s->cc_op); 2492 s->cc_op = CC_OP_DYNAMIC; 2493 } 2482 gen_update_cc_op(s); 2494 2483 if (s->jmp_opt) { 2495 2484 l1 = gen_new_label(); … … 2500 2489 gen_set_label(l1); 2501 2490 gen_goto_tb(s, 1, val); 2502 s->is_jmp = 3;2491 s->is_jmp = DISAS_TB_JUMP; 2503 2492 } else { 2504 2493 … … 2589 2578 interrupts for the next instruction */ 2590 2579 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) 2591 s->is_jmp = 3;2580 s->is_jmp = DISAS_TB_JUMP; 2592 2581 } else { 2593 2582 gen_op_movl_seg_T0_vm(seg_reg); 2594 2583 if (seg_reg == R_SS) 2595 s->is_jmp = 3;2584 s->is_jmp = DISAS_TB_JUMP; 2596 2585 } 2597 2586 } … … 2861 2850 gen_jmp_im(cur_eip); 2862 2851 gen_helper_raise_exception(tcg_const_i32(trapno)); 2863 s->is_jmp = 3;2852 s->is_jmp = DISAS_TB_JUMP; 2864 2853 } 2865 2854 … … 2874 2863 gen_helper_raise_interrupt(tcg_const_i32(intno), 2875 2864 tcg_const_i32(next_eip - cur_eip)); 2876 s->is_jmp = 3;2865 s->is_jmp = DISAS_TB_JUMP; 2877 2866 } 2878 2867 … … 2883 2872 gen_jmp_im(cur_eip); 2884 2873 gen_helper_debug(); 2885 s->is_jmp = 3;2874 s->is_jmp = DISAS_TB_JUMP; 2886 2875 } 2887 2876 … … 2910 2899 tcg_gen_exit_tb(0); 2911 2900 } 2912 s->is_jmp = 3;2901 s->is_jmp = DISAS_TB_JUMP; 2913 2902 } 2914 2903 … … 2918 2907 { 2919 2908 if (s->jmp_opt) { 2920 if (s->cc_op != CC_OP_DYNAMIC) { 2921 gen_op_set_cc_op(s->cc_op); 2922 s->cc_op = CC_OP_DYNAMIC; 2923 } 2909 gen_update_cc_op(s); 2924 2910 gen_goto_tb(s, tb_num, eip); 2925 s->is_jmp = 3;2911 s->is_jmp = DISAS_TB_JUMP; 2926 2912 } else { 2927 2913 gen_jmp_im(eip); … … 3666 3652 case 0x172: 3667 3653 case 0x173: 3654 if (b1 >= 2) { 3655 goto illegal_op; 3656 } 3668 3657 val = ldub_code(s->pc++); 3669 3658 if (is_xmm) { … … 3893 3882 reg = ((modrm >> 3) & 7) | rex_r; 3894 3883 mod = (modrm >> 6) & 3; 3884 if (b1 >= 2) { 3885 goto illegal_op; 3886 } 3895 3887 3896 3888 sse_op2 = sse_op_table6[b].op[b1]; … … 3992 3984 reg = ((modrm >> 3) & 7) | rex_r; 3993 3985 mod = (modrm >> 6) & 3; 3986 if (b1 >= 2) { 3987 goto illegal_op; 3988 } 3994 3989 3995 3990 sse_op2 = sse_op_table7[b].op[b1]; … … 5055 5050 5056 5051 modrm = ldub_code(s->pc++); 5057 mod = (modrm >> 6) & 3;5058 rm = (modrm & 7) | REX_B(s);5059 5052 reg = ((modrm >> 3) & 7) | rex_r; 5060 5053 … … 7304 7297 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7305 7298 } else { 7306 if (s->cc_op != CC_OP_DYNAMIC) { 7307 gen_op_set_cc_op(s->cc_op); 7308 s->cc_op = CC_OP_DYNAMIC; 7309 } 7299 gen_update_cc_op(s); 7310 7300 gen_jmp_im(pc_start - s->cs_base); 7311 7301 gen_helper_sysenter(); … … 7325 7315 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7326 7316 } else { 7327 if (s->cc_op != CC_OP_DYNAMIC) { 7328 gen_op_set_cc_op(s->cc_op); 7329 s->cc_op = CC_OP_DYNAMIC; 7330 } 7317 gen_update_cc_op(s); 7331 7318 gen_jmp_im(pc_start - s->cs_base); 7332 7319 gen_helper_sysexit(tcg_const_i32(dflag)); … … 7337 7324 case 0x105: /* syscall */ 7338 7325 /* XXX: is it usable in real mode ? */ 7339 if (s->cc_op != CC_OP_DYNAMIC) { 7340 gen_op_set_cc_op(s->cc_op); 7341 s->cc_op = CC_OP_DYNAMIC; 7342 } 7326 gen_update_cc_op(s); 7343 7327 gen_jmp_im(pc_start - s->cs_base); 7344 7328 gen_helper_syscall(tcg_const_i32(s->pc - pc_start)); … … 7349 7333 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); 7350 7334 } else { 7351 if (s->cc_op != CC_OP_DYNAMIC) { 7352 gen_op_set_cc_op(s->cc_op); 7353 s->cc_op = CC_OP_DYNAMIC; 7354 } 7335 gen_update_cc_op(s); 7355 7336 gen_jmp_im(pc_start - s->cs_base); 7356 7337 gen_helper_sysret(tcg_const_i32(s->dflag)); … … 7376 7357 gen_jmp_im(pc_start - s->cs_base); 7377 7358 gen_helper_hlt(tcg_const_i32(s->pc - pc_start)); 7378 s->is_jmp = 3;7359 s->is_jmp = DISAS_TB_JUMP; 7379 7360 } 7380 7361 break; … … 7452 7433 op = (modrm >> 3) & 7; 7453 7434 rm = modrm & 7; 7454 #ifdef VBOX7455 /* 0f 01 f9 */7456 if (modrm == 0xf9)7457 {7458 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))7459 goto illegal_op;7460 gen_jmp_im(pc_start - s->cs_base);7461 gen_helper_rdtscp();7462 break;7463 }7464 #endif /* VBOX */7465 7435 switch(op) { 7466 7436 case 0: /* sgdt */ … … 7504 7474 s->cpl != 0) 7505 7475 goto illegal_op; 7506 if (s->cc_op != CC_OP_DYNAMIC) { 7507 gen_op_set_cc_op(s->cc_op); 7508 s->cc_op = CC_OP_DYNAMIC; 7509 } 7476 gen_update_cc_op(s); 7510 7477 gen_jmp_im(pc_start - s->cs_base); 7511 7478 gen_helper_mwait(tcg_const_i32(s->pc - pc_start)); … … 7544 7511 tcg_const_i32(s->pc - pc_start)); 7545 7512 tcg_gen_exit_tb(0); 7546 s->is_jmp = 3;7513 s->is_jmp = DISAS_TB_JUMP; 7547 7514 } 7548 7515 break; … … 8036 8003 if (!(s->flags & HF_SMM_MASK)) 8037 8004 goto illegal_op; 8038 if (s->cc_op != CC_OP_DYNAMIC) { 8039 gen_op_set_cc_op(s->cc_op); 8040 s->cc_op = CC_OP_DYNAMIC; 8041 } 8005 gen_update_cc_op(s); 8042 8006 gen_jmp_im(s->pc - s->cs_base); 8043 8007 gen_helper_rsm(); … … 8180 8144 uint16_t *gen_opc_end; 8181 8145 CPUBreakpoint *bp; 8182 int j, lj , cflags;8146 int j, lj; 8183 8147 uint64_t flags; 8184 8148 target_ulong pc_start; … … 8194 8158 cs_base = tb->cs_base; 8195 8159 flags = tb->flags; 8196 cflags = tb->cflags;8197 8160 8198 8161 dc->pe = (flags >> HF_PE_SHIFT) & 1; … … 8364 8327 8365 8328 #ifdef DEBUG_DISAS 8366 log_cpu_state_mask(CPU_LOG_TB_CPU, env, X86_DUMP_CCOP);8367 8329 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) { 8368 8330 int disas_flags; -
trunk/src/recompiler/targphys.h
r36175 r37689 6 6 #ifdef TARGET_PHYS_ADDR_BITS 7 7 /* target_phys_addr_t is the type of a physical address (its size can 8 be different from 'target_ulong'). We have sizeof(target_phys_addr) 9 = max(sizeof(unsigned long), 10 sizeof(size_of_target_physical_address)) because we must pass a 11 host pointer to memory operations in some cases */ 8 be different from 'target_ulong'). */ 12 9 13 10 #if TARGET_PHYS_ADDR_BITS == 32 -
trunk/src/recompiler/tcg-runtime.c
r37675 r37689 29 29 30 30 #include "tcg/tcg-runtime.h" 31 32 /* 32-bit helpers */ 33 34 int32_t tcg_helper_div_i32(int32_t arg1, int32_t arg2) 35 { 36 return arg1 / arg2; 37 } 38 39 int32_t tcg_helper_rem_i32(int32_t arg1, int32_t arg2) 40 { 41 return arg1 % arg2; 42 } 43 44 uint32_t tcg_helper_divu_i32(uint32_t arg1, uint32_t arg2) 45 { 46 return arg1 / arg2; 47 } 48 49 uint32_t tcg_helper_remu_i32(uint32_t arg1, uint32_t arg2) 50 { 51 return arg1 % arg2; 52 } 53 54 /* 64-bit helpers */ 31 55 32 56 int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2) -
trunk/src/recompiler/tcg/README
r36175 r37689 5 5 TCG (Tiny Code Generator) began as a generic backend for a C 6 6 compiler. It was simplified to be used in QEMU. It also has its roots 7 in the QOP code generator written by Paul Brook. 7 in the QOP code generator written by Paul Brook. 8 8 9 9 2) Definitions … … 31 31 32 32 A TCG "basic block" corresponds to a list of instructions terminated 33 by a branch instruction. 33 by a branch instruction. 34 34 35 35 3) Intermediate representation … … 76 76 77 77 Using the tcg_gen_helper_x_y it is possible to call any function 78 taking i32, i64 or pointer types. Before calling an helper, all 79 globals are stored at their canonical location and it is assumed that 80 the function can modify them. In the future, function modifiers will 81 be allowed to tell that the helper does not read or write some globals. 78 taking i32, i64 or pointer types. By default, before calling an helper, 79 all globals are stored at their canonical location and it is assumed 80 that the function can modify them. This can be overriden by the 81 TCG_CALL_CONST function modifier. By default, the helper is allowed to 82 modify the CPU state or raise an exception. This can be overriden by 83 the TCG_CALL_PURE function modifier, in which case the call to the 84 function is removed if the return value is not used. 82 85 83 86 On some TCG targets (e.g. x86), several calling conventions are … … 97 100 98 101 and_i32 t0, t0, $0xffffffff 99 102 100 103 is suppressed. 101 104 … … 211 214 * eqv_i32/i64 t0, t1, t2 212 215 213 t0=~(t1^t2) 216 t0=~(t1^t2), or equivalently, t0=t1^~t2 214 217 215 218 * nand_i32/i64 t0, t1, t2 … … 266 269 * bswap16_i32/i64 t0, t1 267 270 268 16 bit byte swap on a 32/64 bit value. The two/six high order bytes must be269 set to zero.271 16 bit byte swap on a 32/64 bit value. It assumes that the two/six high order 272 bytes are set to zero. 270 273 271 274 * bswap32_i32/i64 t0, t1 272 275 273 32 bit byte swap on a 32/64 bit value. With a 64 bit value, the four high274 order bytes must be set to zero.276 32 bit byte swap on a 32/64 bit value. With a 64 bit value, it assumes that 277 the four high order bytes are set to zero. 275 278 276 279 * bswap64_i64 t0, t1 … … 282 285 Indicate that the value of t0 won't be used later. It is useful to 283 286 force dead code elimination. 287 288 ********* Conditional moves 289 290 * setcond_i32/i64 cond, dest, t1, t2 291 292 dest = (t1 cond t2) 293 294 Set DEST to 1 if (T1 cond T2) is true, otherwise set to 0. 284 295 285 296 ********* Type conversions … … 313 324 314 325 t0 = read(t1 + offset) 315 Load 8, 16, 32 or 64 bits with or without sign extension from host memory. 326 Load 8, 16, 32 or 64 bits with or without sign extension from host memory. 316 327 offset must be a constant. 317 328 … … 324 335 Write 8, 16, 32 or 64 bits to host memory. 325 336 337 ********* 64-bit target on 32-bit host support 338 339 The following opcodes are internal to TCG. Thus they are to be implemented by 340 32-bit host code generators, but are not to be emitted by guest translators. 341 They are emitted as needed by inline functions within "tcg-op.h". 342 343 * brcond2_i32 cond, t0_low, t0_high, t1_low, t1_high, label 344 345 Similar to brcond, except that the 64-bit values T0 and T1 346 are formed from two 32-bit arguments. 347 348 * add2_i32 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high 349 * sub2_i32 t0_low, t0_high, t1_low, t1_high, t2_low, t2_high 350 351 Similar to add/sub, except that the 64-bit inputs T1 and T2 are 352 formed from two 32-bit arguments, and the 64-bit output T0 353 is returned in two 32-bit outputs. 354 355 * mulu2_i32 t0_low, t0_high, t1, t2 356 357 Similar to mul, except two 32-bit (unsigned) inputs T1 and T2 yielding 358 the full 64-bit product T0. The later is returned in two 32-bit outputs. 359 360 * setcond2_i32 cond, dest, t1_low, t1_high, t2_low, t2_high 361 362 Similar to setcond, except that the 64-bit values T1 and T2 are 363 formed from two 32-bit arguments. The result is a 32-bit value. 364 326 365 ********* QEMU specific operations 327 366 … … 340 379 qemu_ld16u t0, t1, flags 341 380 qemu_ld16s t0, t1, flags 381 qemu_ld32 t0, t1, flags 342 382 qemu_ld32u t0, t1, flags 343 383 qemu_ld32s t0, t1, flags 344 384 qemu_ld64 t0, t1, flags 345 385 346 Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU 347 address type. 'flags' contains the QEMU memory index (selects user or 348 kernel access) for example. 386 Load data at the QEMU CPU address t1 into t0. t1 has the QEMU CPU address 387 type. 'flags' contains the QEMU memory index (selects user or kernel access) 388 for example. 389 390 Note that "qemu_ld32" implies a 32-bit result, while "qemu_ld32u" and 391 "qemu_ld32s" imply a 64-bit result appropriately extended from 32 bits. 349 392 350 393 * qemu_st8 t0, t1, flags … … 419 462 - The first N parameters are passed in registers. 420 463 - The next parameters are passed on the stack by storing them as words. 421 - Some registers are clobbered during the call. 464 - Some registers are clobbered during the call. 422 465 - The function can return 0 or 1 value in registers. On a 32 bit 423 466 target, functions must be able to return 2 values in registers for -
trunk/src/recompiler/tcg/TODO
r36175 r37689 1 - Add new instructions such as: setcond,clz, ctz, popcnt.1 - Add new instructions such as: clz, ctz, popcnt. 2 2 3 - See if it is worth exporting mul2, mulu2, div2, divu2. 3 - See if it is worth exporting mul2, mulu2, div2, divu2. 4 4 5 5 - Support of globals saved in fixed registers between TBs. -
trunk/src/recompiler/tcg/i386/tcg-target.c
r37675 r37689 25 25 #ifndef NDEBUG 26 26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 27 "%eax", 28 "%ecx", 29 "%edx", 30 "%ebx", 31 "%esp", 32 "%ebp", 33 "%esi", 34 "%edi", 27 #if TCG_TARGET_REG_BITS == 64 28 "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", 29 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", 30 #else 31 "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", 32 #endif 35 33 }; 36 34 #endif 37 35 38 36 static const int tcg_target_reg_alloc_order[] = { 39 TCG_REG_EAX, 40 TCG_REG_EDX, 41 TCG_REG_ECX, 37 #if TCG_TARGET_REG_BITS == 64 38 TCG_REG_RBP, 39 TCG_REG_RBX, 40 TCG_REG_R12, 41 TCG_REG_R13, 42 TCG_REG_R14, 43 TCG_REG_R15, 44 TCG_REG_R10, 45 TCG_REG_R11, 46 TCG_REG_R9, 47 TCG_REG_R8, 48 TCG_REG_RCX, 49 TCG_REG_RDX, 50 TCG_REG_RSI, 51 TCG_REG_RDI, 52 TCG_REG_RAX, 53 #else 42 54 TCG_REG_EBX, 43 55 TCG_REG_ESI, 44 56 TCG_REG_EDI, 45 57 TCG_REG_EBP, 58 TCG_REG_ECX, 59 TCG_REG_EDX, 60 TCG_REG_EAX, 61 #endif 46 62 }; 47 63 48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX }; 49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX }; 64 static const int tcg_target_call_iarg_regs[] = { 65 #if TCG_TARGET_REG_BITS == 64 66 TCG_REG_RDI, 67 TCG_REG_RSI, 68 TCG_REG_RDX, 69 TCG_REG_RCX, 70 TCG_REG_R8, 71 TCG_REG_R9, 72 #else 73 TCG_REG_EAX, 74 TCG_REG_EDX, 75 TCG_REG_ECX 76 #endif 77 }; 78 79 static const int tcg_target_call_oarg_regs[2] = { 80 TCG_REG_EAX, 81 TCG_REG_EDX 82 }; 50 83 51 84 static uint8_t *tb_ret_addr; … … 56 89 value += addend; 57 90 switch(type) { 58 case R_386_32: 91 case R_386_PC32: 92 value -= (uintptr_t)code_ptr; 93 if (value != (int32_t)value) { 94 tcg_abort(); 95 } 59 96 *(uint32_t *)code_ptr = value; 60 97 break; 61 case R_386_PC32: 62 *(uint32_t *)code_ptr = value - (long)code_ptr; 98 case R_386_PC8: 99 value -= (uintptr_t)code_ptr; 100 if (value != (int8_t)value) { 101 tcg_abort(); 102 } 103 *(uint8_t *)code_ptr = value; 63 104 break; 64 105 default: … … 86 127 static inline int tcg_target_get_call_iarg_regs_count(int flags) 87 128 { 129 if (TCG_TARGET_REG_BITS == 64) { 130 return 6; 131 } 132 88 133 flags &= TCG_CALL_TYPE_MASK; 89 134 switch(flags) { … … 132 177 case 'q': 133 178 ct->ct |= TCG_CT_REG; 134 tcg_regset_set32(ct->u.regs, 0, 0xf); 179 if (TCG_TARGET_REG_BITS == 64) { 180 tcg_regset_set32(ct->u.regs, 0, 0xffff); 181 } else { 182 tcg_regset_set32(ct->u.regs, 0, 0xf); 183 } 135 184 break; 136 185 case 'r': 137 186 ct->ct |= TCG_CT_REG; 138 tcg_regset_set32(ct->u.regs, 0, 0xff); 187 if (TCG_TARGET_REG_BITS == 64) { 188 tcg_regset_set32(ct->u.regs, 0, 0xffff); 189 } else { 190 tcg_regset_set32(ct->u.regs, 0, 0xff); 191 } 139 192 break; 140 193 … … 142 195 case 'L': 143 196 ct->ct |= TCG_CT_REG; 144 tcg_regset_set32(ct->u.regs, 0, 0xff); 145 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); 146 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX); 147 break; 197 if (TCG_TARGET_REG_BITS == 64) { 198 tcg_regset_set32(ct->u.regs, 0, 0xffff); 199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RSI); 200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_RDI); 201 } else { 202 tcg_regset_set32(ct->u.regs, 0, 0xff); 203 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); 204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX); 205 } 206 break; 207 208 case 'e': 209 ct->ct |= TCG_CT_CONST_S32; 210 break; 211 case 'Z': 212 ct->ct |= TCG_CT_CONST_U32; 213 break; 214 148 215 default: 149 216 return -1; … … 158 225 const TCGArgConstraint *arg_ct) 159 226 { 160 int ct; 161 ct = arg_ct->ct; 162 if (ct & TCG_CT_CONST) 227 int ct = arg_ct->ct; 228 if (ct & TCG_CT_CONST) { 163 229 return 1; 164 else 165 return 0; 166 } 167 230 } 231 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) { 232 return 1; 233 } 234 if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) { 235 return 1; 236 } 237 return 0; 238 } 239 240 #if TCG_TARGET_REG_BITS == 64 241 # define LOWREGMASK(x) ((x) & 7) 242 #else 243 # define LOWREGMASK(x) (x) 244 #endif 245 246 #define P_EXT 0x100 /* 0x0f opcode prefix */ 247 #define P_DATA16 0x200 /* 0x66 opcode prefix */ 248 #if TCG_TARGET_REG_BITS == 64 249 # define P_ADDR32 0x400 /* 0x67 opcode prefix */ 250 # define P_REXW 0x800 /* Set REX.W = 1 */ 251 # define P_REXB_R 0x1000 /* REG field as byte register */ 252 # define P_REXB_RM 0x2000 /* R/M field as byte register */ 253 #else 254 # define P_ADDR32 0 255 # define P_REXW 0 256 # define P_REXB_R 0 257 # define P_REXB_RM 0 258 #endif 259 260 #define OPC_ARITH_EvIz (0x81) 261 #define OPC_ARITH_EvIb (0x83) 262 #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ 263 #define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3)) 264 #define OPC_BSWAP (0xc8 | P_EXT) 265 #define OPC_CALL_Jz (0xe8) 266 #define OPC_CMP_GvEv (OPC_ARITH_GvEv | (ARITH_CMP << 3)) 267 #define OPC_DEC_r32 (0x48) 268 #define OPC_IMUL_GvEv (0xaf | P_EXT) 269 #define OPC_IMUL_GvEvIb (0x6b) 270 #define OPC_IMUL_GvEvIz (0x69) 271 #define OPC_INC_r32 (0x40) 272 #define OPC_JCC_long (0x80 | P_EXT) /* ... plus condition code */ 273 #define OPC_JCC_short (0x70) /* ... plus condition code */ 274 #define OPC_JMP_long (0xe9) 275 #define OPC_JMP_short (0xeb) 276 #define OPC_LEA (0x8d) 277 #define OPC_MOVB_EvGv (0x88) /* stores, more or less */ 278 #define OPC_MOVL_EvGv (0x89) /* stores, more or less */ 279 #define OPC_MOVL_GvEv (0x8b) /* loads, more or less */ 280 #define OPC_MOVL_EvIz (0xc7) 281 #define OPC_MOVL_Iv (0xb8) 282 #define OPC_MOVSBL (0xbe | P_EXT) 283 #define OPC_MOVSWL (0xbf | P_EXT) 284 #define OPC_MOVSLQ (0x63 | P_REXW) 285 #define OPC_MOVZBL (0xb6 | P_EXT) 286 #define OPC_MOVZWL (0xb7 | P_EXT) 287 #define OPC_POP_r32 (0x58) 288 #define OPC_PUSH_r32 (0x50) 289 #define OPC_PUSH_Iv (0x68) 290 #define OPC_PUSH_Ib (0x6a) 291 #define OPC_RET (0xc3) 292 #define OPC_SETCC (0x90 | P_EXT | P_REXB_RM) /* ... plus cc */ 293 #define OPC_SHIFT_1 (0xd1) 294 #define OPC_SHIFT_Ib (0xc1) 295 #define OPC_SHIFT_cl (0xd3) 296 #define OPC_TESTL (0x85) 297 #define OPC_XCHG_ax_r32 (0x90) 298 299 #define OPC_GRP3_Ev (0xf7) 300 #define OPC_GRP5 (0xff) 301 302 /* Group 1 opcode extensions for 0x80-0x83. 303 These are also used as modifiers for OPC_ARITH. */ 168 304 #define ARITH_ADD 0 169 305 #define ARITH_OR 1 … … 175 311 #define ARITH_CMP 7 176 312 313 /* Group 2 opcode extensions for 0xc0, 0xc1, 0xd0-0xd3. */ 177 314 #define SHIFT_ROL 0 178 315 #define SHIFT_ROR 1 … … 181 318 #define SHIFT_SAR 7 182 319 320 /* Group 3 opcode extensions for 0xf6, 0xf7. To be used with OPC_GRP3. */ 321 #define EXT3_NOT 2 322 #define EXT3_NEG 3 323 #define EXT3_MUL 4 324 #define EXT3_IMUL 5 325 #define EXT3_DIV 6 326 #define EXT3_IDIV 7 327 328 /* Group 5 opcode extensions for 0xff. To be used with OPC_GRP5. */ 329 #define EXT5_INC_Ev 0 330 #define EXT5_DEC_Ev 1 331 #define EXT5_CALLN_Ev 2 332 #define EXT5_JMPN_Ev 4 333 334 /* Condition codes to be added to OPC_JCC_{long,short}. */ 183 335 #define JCC_JMP (-1) 184 336 #define JCC_JO 0x0 … … 199 351 #define JCC_JG 0xf 200 352 201 #define P_EXT 0x100 /* 0x0f opcode prefix */202 203 353 static const uint8_t tcg_cond_to_jcc[10] = { 204 354 [TCG_COND_EQ] = JCC_JE, … … 214 364 }; 215 365 216 static inline void tcg_out_opc(TCGContext *s, int opc) 217 { 218 if (opc & P_EXT) 366 #if TCG_TARGET_REG_BITS == 64 367 static void tcg_out_opc(TCGContext *s, int opc, int r, int rm, int x) 368 { 369 int rex; 370 371 if (opc & P_DATA16) { 372 /* We should never be asking for both 16 and 64-bit operation. */ 373 assert((opc & P_REXW) == 0); 374 tcg_out8(s, 0x66); 375 } 376 if (opc & P_ADDR32) { 377 tcg_out8(s, 0x67); 378 } 379 380 rex = 0; 381 rex |= (opc & P_REXW) >> 8; /* REX.W */ 382 rex |= (r & 8) >> 1; /* REX.R */ 383 rex |= (x & 8) >> 2; /* REX.X */ 384 rex |= (rm & 8) >> 3; /* REX.B */ 385 386 /* P_REXB_{R,RM} indicates that the given register is the low byte. 387 For %[abcd]l we need no REX prefix, but for %{si,di,bp,sp}l we do, 388 as otherwise the encoding indicates %[abcd]h. Note that the values 389 that are ORed in merely indicate that the REX byte must be present; 390 those bits get discarded in output. */ 391 rex |= opc & (r >= 4 ? P_REXB_R : 0); 392 rex |= opc & (rm >= 4 ? P_REXB_RM : 0); 393 394 if (rex) { 395 tcg_out8(s, (uint8_t)(rex | 0x40)); 396 } 397 398 if (opc & P_EXT) { 219 399 tcg_out8(s, 0x0f); 400 } 220 401 tcg_out8(s, opc); 221 402 } 222 223 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) 224 { 225 tcg_out_opc(s, opc); 226 tcg_out8(s, 0xc0 | (r << 3) | rm); 227 } 228 229 /* rm == -1 means no register index */ 230 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, 231 int32_t offset) 232 { 233 tcg_out_opc(s, opc); 234 if (rm == -1) { 235 tcg_out8(s, 0x05 | (r << 3)); 403 #else 404 static void tcg_out_opc(TCGContext *s, int opc) 405 { 406 if (opc & P_DATA16) { 407 tcg_out8(s, 0x66); 408 } 409 if (opc & P_EXT) { 410 tcg_out8(s, 0x0f); 411 } 412 tcg_out8(s, opc); 413 } 414 /* Discard the register arguments to tcg_out_opc early, so as not to penalize 415 the 32-bit compilation paths. This method works with all versions of gcc, 416 whereas relying on optimization may not be able to exclude them. */ 417 #define tcg_out_opc(s, opc, r, rm, x) (tcg_out_opc)(s, opc) 418 #endif 419 420 static void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) 421 { 422 tcg_out_opc(s, opc, r, rm, 0); 423 tcg_out8(s, 0xc0 | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); 424 } 425 426 /* Output an opcode with a full "rm + (index<<shift) + offset" address mode. 427 We handle either RM and INDEX missing with a negative value. In 64-bit 428 mode for absolute addresses, ~RM is the size of the immediate operand 429 that will follow the instruction. */ 430 431 static void tcg_out_modrm_sib_offset(TCGContext *s, int opc, int r, int rm, 432 int index, int shift, 433 tcg_target_long offset) 434 { 435 int mod, len; 436 437 if (index < 0 && rm < 0) { 438 if (TCG_TARGET_REG_BITS == 64) { 439 /* Try for a rip-relative addressing mode. This has replaced 440 the 32-bit-mode absolute addressing encoding. */ 441 tcg_target_long pc = (tcg_target_long)s->code_ptr + 5 + ~rm; 442 tcg_target_long disp = offset - pc; 443 if (disp == (int32_t)disp) { 444 tcg_out_opc(s, opc, r, 0, 0); 445 tcg_out8(s, (LOWREGMASK(r) << 3) | 5); 446 tcg_out32(s, disp); 447 return; 448 } 449 450 /* Try for an absolute address encoding. This requires the 451 use of the MODRM+SIB encoding and is therefore larger than 452 rip-relative addressing. */ 453 if (offset == (int32_t)offset) { 454 tcg_out_opc(s, opc, r, 0, 0); 455 tcg_out8(s, (LOWREGMASK(r) << 3) | 4); 456 tcg_out8(s, (4 << 3) | 5); 457 tcg_out32(s, offset); 458 return; 459 } 460 461 /* ??? The memory isn't directly addressable. */ 462 tcg_abort(); 463 } else { 464 /* Absolute address. */ 465 tcg_out_opc(s, opc, r, 0, 0); 466 tcg_out8(s, (r << 3) | 5); 467 tcg_out32(s, offset); 468 return; 469 } 470 } 471 472 /* Find the length of the immediate addend. Note that the encoding 473 that would be used for (%ebp) indicates absolute addressing. */ 474 if (rm < 0) { 475 mod = 0, len = 4, rm = 5; 476 } else if (offset == 0 && LOWREGMASK(rm) != TCG_REG_EBP) { 477 mod = 0, len = 0; 478 } else if (offset == (int8_t)offset) { 479 mod = 0x40, len = 1; 480 } else { 481 mod = 0x80, len = 4; 482 } 483 484 /* Use a single byte MODRM format if possible. Note that the encoding 485 that would be used for %esp is the escape to the two byte form. */ 486 if (index < 0 && LOWREGMASK(rm) != TCG_REG_ESP) { 487 /* Single byte MODRM format. */ 488 tcg_out_opc(s, opc, r, rm, 0); 489 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | LOWREGMASK(rm)); 490 } else { 491 /* Two byte MODRM+SIB format. */ 492 493 /* Note that the encoding that would place %esp into the index 494 field indicates no index register. In 64-bit mode, the REX.X 495 bit counts, so %r12 can be used as the index. */ 496 if (index < 0) { 497 index = 4; 498 } else { 499 assert(index != TCG_REG_ESP); 500 } 501 502 tcg_out_opc(s, opc, r, rm, index); 503 tcg_out8(s, mod | (LOWREGMASK(r) << 3) | 4); 504 tcg_out8(s, (shift << 6) | (LOWREGMASK(index) << 3) | LOWREGMASK(rm)); 505 } 506 507 if (len == 1) { 508 tcg_out8(s, offset); 509 } else if (len == 4) { 236 510 tcg_out32(s, offset); 237 } else if (offset == 0 && rm != TCG_REG_EBP) { 238 if (rm == TCG_REG_ESP) { 239 tcg_out8(s, 0x04 | (r << 3)); 240 tcg_out8(s, 0x24); 241 } else { 242 tcg_out8(s, 0x00 | (r << 3) | rm); 243 } 244 } else if ((int8_t)offset == offset) { 245 if (rm == TCG_REG_ESP) { 246 tcg_out8(s, 0x44 | (r << 3)); 247 tcg_out8(s, 0x24); 248 } else { 249 tcg_out8(s, 0x40 | (r << 3) | rm); 250 } 251 tcg_out8(s, offset); 511 } 512 } 513 514 /* A simplification of the above with no index or shift. */ 515 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, 516 int rm, tcg_target_long offset) 517 { 518 tcg_out_modrm_sib_offset(s, opc, r, rm, -1, 0, offset); 519 } 520 521 /* Generate dest op= src. Uses the same ARITH_* codes as tgen_arithi. */ 522 static inline void tgen_arithr(TCGContext *s, int subop, int dest, int src) 523 { 524 /* Propagate an opcode prefix, such as P_REXW. */ 525 int ext = subop & ~0x7; 526 subop &= 0x7; 527 528 tcg_out_modrm(s, OPC_ARITH_GvEv + (subop << 3) + ext, dest, src); 529 } 530 531 static inline void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg) 532 { 533 if (arg != ret) { 534 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); 535 tcg_out_modrm(s, opc, ret, arg); 536 } 537 } 538 539 static void tcg_out_movi(TCGContext *s, TCGType type, 540 int ret, tcg_target_long arg) 541 { 542 if (arg == 0) { 543 tgen_arithr(s, ARITH_XOR, ret, ret); 544 return; 545 } else if (arg == (uint32_t)arg || type == TCG_TYPE_I32) { 546 tcg_out_opc(s, OPC_MOVL_Iv + LOWREGMASK(ret), 0, ret, 0); 547 tcg_out32(s, arg); 548 } else if (arg == (int32_t)arg) { 549 tcg_out_modrm(s, OPC_MOVL_EvIz + P_REXW, 0, ret); 550 tcg_out32(s, arg); 252 551 } else { 253 if (rm == TCG_REG_ESP) { 254 tcg_out8(s, 0x84 | (r << 3)); 255 tcg_out8(s, 0x24); 256 } else { 257 tcg_out8(s, 0x80 | (r << 3) | rm); 258 } 259 tcg_out32(s, offset); 260 } 261 } 262 263 static inline void tcg_out_mov(TCGContext *s, int ret, int arg) 264 { 265 if (arg != ret) 266 tcg_out_modrm(s, 0x8b, ret, arg); 267 } 268 269 static inline void tcg_out_movi(TCGContext *s, TCGType type, 270 int ret, int32_t arg) 271 { 272 if (arg == 0) { 273 /* xor r0,r0 */ 274 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); 552 tcg_out_opc(s, OPC_MOVL_Iv + P_REXW + LOWREGMASK(ret), 0, ret, 0); 553 tcg_out32(s, arg); 554 tcg_out32(s, arg >> 31 >> 1); 555 } 556 } 557 558 static inline void tcg_out_pushi(TCGContext *s, tcg_target_long val) 559 { 560 if (val == (int8_t)val) { 561 tcg_out_opc(s, OPC_PUSH_Ib, 0, 0, 0); 562 tcg_out8(s, val); 563 } else if (val == (int32_t)val) { 564 tcg_out_opc(s, OPC_PUSH_Iv, 0, 0, 0); 565 tcg_out32(s, val); 275 566 } else { 276 tcg_out8(s, 0xb8 + ret); 277 tcg_out32(s, arg); 278 } 567 tcg_abort(); 568 } 569 } 570 571 static inline void tcg_out_push(TCGContext *s, int reg) 572 { 573 tcg_out_opc(s, OPC_PUSH_r32 + LOWREGMASK(reg), 0, reg, 0); 574 } 575 576 static inline void tcg_out_pop(TCGContext *s, int reg) 577 { 578 tcg_out_opc(s, OPC_POP_r32 + LOWREGMASK(reg), 0, reg, 0); 279 579 } 280 580 … … 282 582 int arg1, tcg_target_long arg2) 283 583 { 284 /* movl */285 tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2);584 int opc = OPC_MOVL_GvEv + (type == TCG_TYPE_I64 ? P_REXW : 0); 585 tcg_out_modrm_offset(s, opc, ret, arg1, arg2); 286 586 } 287 587 … … 289 589 int arg1, tcg_target_long arg2) 290 590 { 291 /* movl */ 292 tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2); 293 } 294 295 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf) 296 { 297 if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) { 298 /* inc */ 299 tcg_out_opc(s, 0x40 + r0); 300 } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) { 301 /* dec */ 302 tcg_out_opc(s, 0x48 + r0); 303 } else if (val == (int8_t)val) { 304 tcg_out_modrm(s, 0x83, c, r0); 591 int opc = OPC_MOVL_EvGv + (type == TCG_TYPE_I64 ? P_REXW : 0); 592 tcg_out_modrm_offset(s, opc, arg, arg1, arg2); 593 } 594 595 static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) 596 { 597 /* Propagate an opcode prefix, such as P_DATA16. */ 598 int ext = subopc & ~0x7; 599 subopc &= 0x7; 600 601 if (count == 1) { 602 tcg_out_modrm(s, OPC_SHIFT_1 + ext, subopc, reg); 603 } else { 604 tcg_out_modrm(s, OPC_SHIFT_Ib + ext, subopc, reg); 605 tcg_out8(s, count); 606 } 607 } 608 609 static inline void tcg_out_bswap32(TCGContext *s, int reg) 610 { 611 tcg_out_opc(s, OPC_BSWAP + LOWREGMASK(reg), 0, reg, 0); 612 } 613 614 static inline void tcg_out_rolw_8(TCGContext *s, int reg) 615 { 616 tcg_out_shifti(s, SHIFT_ROL + P_DATA16, reg, 8); 617 } 618 619 static inline void tcg_out_ext8u(TCGContext *s, int dest, int src) 620 { 621 /* movzbl */ 622 assert(src < 4 || TCG_TARGET_REG_BITS == 64); 623 tcg_out_modrm(s, OPC_MOVZBL + P_REXB_RM, dest, src); 624 } 625 626 static void tcg_out_ext8s(TCGContext *s, int dest, int src, int rexw) 627 { 628 /* movsbl */ 629 assert(src < 4 || TCG_TARGET_REG_BITS == 64); 630 tcg_out_modrm(s, OPC_MOVSBL + P_REXB_RM + rexw, dest, src); 631 } 632 633 static inline void tcg_out_ext16u(TCGContext *s, int dest, int src) 634 { 635 /* movzwl */ 636 tcg_out_modrm(s, OPC_MOVZWL, dest, src); 637 } 638 639 static inline void tcg_out_ext16s(TCGContext *s, int dest, int src, int rexw) 640 { 641 /* movsw[lq] */ 642 tcg_out_modrm(s, OPC_MOVSWL + rexw, dest, src); 643 } 644 645 static inline void tcg_out_ext32u(TCGContext *s, int dest, int src) 646 { 647 /* 32-bit mov zero extends. */ 648 tcg_out_modrm(s, OPC_MOVL_GvEv, dest, src); 649 } 650 651 static inline void tcg_out_ext32s(TCGContext *s, int dest, int src) 652 { 653 tcg_out_modrm(s, OPC_MOVSLQ, dest, src); 654 } 655 656 static inline void tcg_out_bswap64(TCGContext *s, int reg) 657 { 658 tcg_out_opc(s, OPC_BSWAP + P_REXW + LOWREGMASK(reg), 0, reg, 0); 659 } 660 661 static void tgen_arithi(TCGContext *s, int c, int r0, 662 tcg_target_long val, int cf) 663 { 664 int rexw = 0; 665 666 if (TCG_TARGET_REG_BITS == 64) { 667 rexw = c & -8; 668 c &= 7; 669 } 670 671 /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce 672 partial flags update stalls on Pentium4 and are not recommended 673 by current Intel optimization manuals. */ 674 if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { 675 int is_inc = (c == ARITH_ADD) ^ (val < 0); 676 if (TCG_TARGET_REG_BITS == 64) { 677 /* The single-byte increment encodings are re-tasked as the 678 REX prefixes. Use the MODRM encoding. */ 679 tcg_out_modrm(s, OPC_GRP5 + rexw, 680 (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); 681 } else { 682 tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); 683 } 684 return; 685 } 686 687 if (c == ARITH_AND) { 688 if (TCG_TARGET_REG_BITS == 64) { 689 if (val == 0xffffffffu) { 690 tcg_out_ext32u(s, r0, r0); 691 return; 692 } 693 if (val == (uint32_t)val) { 694 /* AND with no high bits set can use a 32-bit operation. */ 695 rexw = 0; 696 } 697 } 698 if (val == 0xffu && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { 699 tcg_out_ext8u(s, r0, r0); 700 return; 701 } 702 if (val == 0xffffu) { 703 tcg_out_ext16u(s, r0, r0); 704 return; 705 } 706 } 707 708 if (val == (int8_t)val) { 709 tcg_out_modrm(s, OPC_ARITH_EvIb + rexw, c, r0); 305 710 tcg_out8(s, val); 306 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) { 307 /* movzbl */ 308 tcg_out_modrm(s, 0xb6 | P_EXT, r0, r0); 309 } else if (c == ARITH_AND && val == 0xffffu) { 310 /* movzwl */ 311 tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0); 312 } else { 313 tcg_out_modrm(s, 0x81, c, r0); 711 return; 712 } 713 if (rexw == 0 || val == (int32_t)val) { 714 tcg_out_modrm(s, OPC_ARITH_EvIz + rexw, c, r0); 314 715 tcg_out32(s, val); 315 } 716 return; 717 } 718 719 tcg_abort(); 316 720 } 317 721 318 722 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) 319 723 { 320 if (val != 0) 321 tgen_arithi(s, ARITH_ADD, reg, val, 0); 724 if (val != 0) { 725 tgen_arithi(s, ARITH_ADD + P_REXW, reg, val, 0); 726 } 322 727 } 323 728 … … 325 730 static void tcg_out_subi(TCGContext *s, int reg, tcg_target_long val) 326 731 { 327 if (val != 0) 328 tgen_arithi(s, ARITH_SUB, reg, val, 0); 329 } 330 #endif 331 332 static void tcg_out_jxx(TCGContext *s, int opc, int label_index) 732 if (val != 0) { 733 tgen_arithi(s, ARITH_SUB + P_REXW, reg, val, 0); 734 } 735 } 736 #endif 737 738 /* Use SMALL != 0 to force a short forward branch. */ 739 static void tcg_out_jxx(TCGContext *s, int opc, int label_index, int small) 333 740 { 334 741 int32_t val, val1; … … 339 746 val1 = val - 2; 340 747 if ((int8_t)val1 == val1) { 341 if (opc == -1) 342 tcg_out8(s, 0xeb); 343 else 344 tcg_out8(s, 0x70 + opc); 748 if (opc == -1) { 749 tcg_out8(s, OPC_JMP_short); 750 } else { 751 tcg_out8(s, OPC_JCC_short + opc); 752 } 345 753 tcg_out8(s, val1); 346 754 } else { 755 if (small) { 756 tcg_abort(); 757 } 347 758 if (opc == -1) { 348 tcg_out8(s, 0xe9);759 tcg_out8(s, OPC_JMP_long); 349 760 tcg_out32(s, val - 5); 350 761 } else { 351 tcg_out8(s, 0x0f); 352 tcg_out8(s, 0x80 + opc); 762 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); 353 763 tcg_out32(s, val - 6); 354 764 } 355 765 } 766 } else if (small) { 767 if (opc == -1) { 768 tcg_out8(s, OPC_JMP_short); 769 } else { 770 tcg_out8(s, OPC_JCC_short + opc); 771 } 772 tcg_out_reloc(s, s->code_ptr, R_386_PC8, label_index, -1); 773 s->code_ptr += 1; 356 774 } else { 357 775 if (opc == -1) { 358 tcg_out8(s, 0xe9); 359 } else { 360 tcg_out8(s, 0x0f); 361 tcg_out8(s, 0x80 + opc); 776 tcg_out8(s, OPC_JMP_long); 777 } else { 778 tcg_out_opc(s, OPC_JCC_long + opc, 0, 0, 0); 362 779 } 363 780 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4); … … 366 783 } 367 784 368 static void tcg_out_brcond(TCGContext *s, int cond, 369 TCGArg arg1, TCGArg arg2, int const_arg2, 370 int label_index) 785 static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2, 786 int const_arg2, int rexw) 371 787 { 372 788 if (const_arg2) { 373 789 if (arg2 == 0) { 374 790 /* test r, r */ 375 tcg_out_modrm(s, 0x85, arg1, arg1);376 } else { 377 tgen_arithi(s, ARITH_CMP , arg1, arg2, 0);791 tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1); 792 } else { 793 tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0); 378 794 } 379 795 } else { 380 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1); 381 } 382 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); 383 } 384 796 tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2); 797 } 798 } 799 800 static void tcg_out_brcond32(TCGContext *s, TCGCond cond, 801 TCGArg arg1, TCGArg arg2, int const_arg2, 802 int label_index, int small) 803 { 804 tcg_out_cmp(s, arg1, arg2, const_arg2, 0); 805 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small); 806 } 807 808 #if TCG_TARGET_REG_BITS == 64 809 static void tcg_out_brcond64(TCGContext *s, TCGCond cond, 810 TCGArg arg1, TCGArg arg2, int const_arg2, 811 int label_index, int small) 812 { 813 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); 814 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index, small); 815 } 816 #else 385 817 /* XXX: we implement it at the target level to avoid having to 386 818 handle cross basic blocks temporaries */ 387 static void tcg_out_brcond2(TCGContext *s, 388 const TCGArg *args, const int *const_args)819 static void tcg_out_brcond2(TCGContext *s, const TCGArg *args, 820 const int *const_args, int small) 389 821 { 390 822 int label_next; … … 392 824 switch(args[4]) { 393 825 case TCG_COND_EQ: 394 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next); 395 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]); 826 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], 827 label_next, 1); 828 tcg_out_brcond32(s, TCG_COND_EQ, args[1], args[3], const_args[3], 829 args[5], small); 396 830 break; 397 831 case TCG_COND_NE: 398 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]); 399 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]); 832 tcg_out_brcond32(s, TCG_COND_NE, args[0], args[2], const_args[2], 833 args[5], small); 834 tcg_out_brcond32(s, TCG_COND_NE, args[1], args[3], const_args[3], 835 args[5], small); 400 836 break; 401 837 case TCG_COND_LT: 402 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); 403 tcg_out_jxx(s, JCC_JNE, label_next); 404 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]); 838 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], 839 args[5], small); 840 tcg_out_jxx(s, JCC_JNE, label_next, 1); 841 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], 842 args[5], small); 405 843 break; 406 844 case TCG_COND_LE: 407 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); 408 tcg_out_jxx(s, JCC_JNE, label_next); 409 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]); 845 tcg_out_brcond32(s, TCG_COND_LT, args[1], args[3], const_args[3], 846 args[5], small); 847 tcg_out_jxx(s, JCC_JNE, label_next, 1); 848 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], 849 args[5], small); 410 850 break; 411 851 case TCG_COND_GT: 412 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); 413 tcg_out_jxx(s, JCC_JNE, label_next); 414 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]); 852 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], 853 args[5], small); 854 tcg_out_jxx(s, JCC_JNE, label_next, 1); 855 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], 856 args[5], small); 415 857 break; 416 858 case TCG_COND_GE: 417 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); 418 tcg_out_jxx(s, JCC_JNE, label_next); 419 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]); 859 tcg_out_brcond32(s, TCG_COND_GT, args[1], args[3], const_args[3], 860 args[5], small); 861 tcg_out_jxx(s, JCC_JNE, label_next, 1); 862 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], 863 args[5], small); 420 864 break; 421 865 case TCG_COND_LTU: 422 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); 423 tcg_out_jxx(s, JCC_JNE, label_next); 424 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]); 866 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], 867 args[5], small); 868 tcg_out_jxx(s, JCC_JNE, label_next, 1); 869 tcg_out_brcond32(s, TCG_COND_LTU, args[0], args[2], const_args[2], 870 args[5], small); 425 871 break; 426 872 case TCG_COND_LEU: 427 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); 428 tcg_out_jxx(s, JCC_JNE, label_next); 429 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]); 873 tcg_out_brcond32(s, TCG_COND_LTU, args[1], args[3], const_args[3], 874 args[5], small); 875 tcg_out_jxx(s, JCC_JNE, label_next, 1); 876 tcg_out_brcond32(s, TCG_COND_LEU, args[0], args[2], const_args[2], 877 args[5], small); 430 878 break; 431 879 case TCG_COND_GTU: 432 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); 433 tcg_out_jxx(s, JCC_JNE, label_next); 434 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]); 880 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], 881 args[5], small); 882 tcg_out_jxx(s, JCC_JNE, label_next, 1); 883 tcg_out_brcond32(s, TCG_COND_GTU, args[0], args[2], const_args[2], 884 args[5], small); 435 885 break; 436 886 case TCG_COND_GEU: 437 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); 438 tcg_out_jxx(s, JCC_JNE, label_next); 439 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]); 887 tcg_out_brcond32(s, TCG_COND_GTU, args[1], args[3], const_args[3], 888 args[5], small); 889 tcg_out_jxx(s, JCC_JNE, label_next, 1); 890 tcg_out_brcond32(s, TCG_COND_GEU, args[0], args[2], const_args[2], 891 args[5], small); 440 892 break; 441 893 default: … … 443 895 } 444 896 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr); 897 } 898 #endif 899 900 static void tcg_out_setcond32(TCGContext *s, TCGCond cond, TCGArg dest, 901 TCGArg arg1, TCGArg arg2, int const_arg2) 902 { 903 tcg_out_cmp(s, arg1, arg2, const_arg2, 0); 904 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); 905 tcg_out_ext8u(s, dest, dest); 906 } 907 908 #if TCG_TARGET_REG_BITS == 64 909 static void tcg_out_setcond64(TCGContext *s, TCGCond cond, TCGArg dest, 910 TCGArg arg1, TCGArg arg2, int const_arg2) 911 { 912 tcg_out_cmp(s, arg1, arg2, const_arg2, P_REXW); 913 tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest); 914 tcg_out_ext8u(s, dest, dest); 915 } 916 #else 917 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args, 918 const int *const_args) 919 { 920 TCGArg new_args[6]; 921 int label_true, label_over; 922 923 memcpy(new_args, args+1, 5*sizeof(TCGArg)); 924 925 if (args[0] == args[1] || args[0] == args[2] 926 || (!const_args[3] && args[0] == args[3]) 927 || (!const_args[4] && args[0] == args[4])) { 928 /* When the destination overlaps with one of the argument 929 registers, don't do anything tricky. */ 930 label_true = gen_new_label(); 931 label_over = gen_new_label(); 932 933 new_args[5] = label_true; 934 tcg_out_brcond2(s, new_args, const_args+1, 1); 935 936 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); 937 tcg_out_jxx(s, JCC_JMP, label_over, 1); 938 tcg_out_label(s, label_true, (tcg_target_long)s->code_ptr); 939 940 tcg_out_movi(s, TCG_TYPE_I32, args[0], 1); 941 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr); 942 } else { 943 /* When the destination does not overlap one of the arguments, 944 clear the destination first, jump if cond false, and emit an 945 increment in the true case. This results in smaller code. */ 946 947 tcg_out_movi(s, TCG_TYPE_I32, args[0], 0); 948 949 label_over = gen_new_label(); 950 new_args[4] = tcg_invert_cond(new_args[4]); 951 new_args[5] = label_over; 952 tcg_out_brcond2(s, new_args, const_args+1, 1); 953 954 tgen_arithi(s, ARITH_ADD, args[0], 1, 0); 955 tcg_out_label(s, label_over, (tcg_target_long)s->code_ptr); 956 } 957 } 958 #endif 959 960 static void tcg_out_branch(TCGContext *s, int call, tcg_target_long dest) 961 { 962 tcg_target_long disp = dest - (tcg_target_long)s->code_ptr - 5; 963 964 if (disp == (int32_t)disp) { 965 tcg_out_opc(s, call ? OPC_CALL_Jz : OPC_JMP_long, 0, 0, 0); 966 tcg_out32(s, disp); 967 } else { 968 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R10, dest); 969 tcg_out_modrm(s, OPC_GRP5, 970 call ? EXT5_CALLN_Ev : EXT5_JMPN_Ev, TCG_REG_R10); 971 } 972 } 973 974 static inline void tcg_out_calli(TCGContext *s, tcg_target_long dest) 975 { 976 #ifdef VBOX 977 tcg_gen_stack_alignment_check(s); 978 #endif 979 tcg_out_branch(s, 1, dest); 980 } 981 982 static void tcg_out_jmp(TCGContext *s, tcg_target_long dest) 983 { 984 tcg_out_branch(s, 0, dest); 445 985 } 446 986 … … 462 1002 __stq_mmu, 463 1003 }; 464 #endif 465 466 #ifndef CONFIG_USER_ONLY 467 #define GUEST_BASE 0 468 #endif 1004 1005 /* Perform the TLB load and compare. 1006 1007 Inputs: 1008 ADDRLO_IDX contains the index into ARGS of the low part of the 1009 address; the high part of the address is at ADDR_LOW_IDX+1. 1010 1011 MEM_INDEX and S_BITS are the memory context and log2 size of the load. 1012 1013 WHICH is the offset into the CPUTLBEntry structure of the slot to read. 1014 This should be offsetof addr_read or addr_write. 1015 1016 Outputs: 1017 LABEL_PTRS is filled with 1 (32-bit addresses) or 2 (64-bit addresses) 1018 positions of the displacements of forward jumps to the TLB miss case. 1019 1020 First argument register is loaded with the low part of the address. 1021 In the TLB hit case, it has been adjusted as indicated by the TLB 1022 and so is a host address. In the TLB miss case, it continues to 1023 hold a guest address. 1024 1025 Second argument register is clobbered. */ 1026 1027 static inline void tcg_out_tlb_load(TCGContext *s, int addrlo_idx, 1028 int mem_index, int s_bits, 1029 const TCGArg *args, 1030 uint8_t **label_ptr, int which) 1031 { 1032 const int addrlo = args[addrlo_idx]; 1033 const int r0 = tcg_target_call_iarg_regs[0]; 1034 const int r1 = tcg_target_call_iarg_regs[1]; 1035 TCGType type = TCG_TYPE_I32; 1036 int rexw = 0; 1037 1038 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 64) { 1039 type = TCG_TYPE_I64; 1040 rexw = P_REXW; 1041 } 1042 1043 tcg_out_mov(s, type, r1, addrlo); 1044 tcg_out_mov(s, type, r0, addrlo); 1045 1046 tcg_out_shifti(s, SHIFT_SHR + rexw, r1, 1047 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 1048 1049 tgen_arithi(s, ARITH_AND + rexw, r0, 1050 TARGET_PAGE_MASK | ((1 << s_bits) - 1), 0); 1051 tgen_arithi(s, ARITH_AND + rexw, r1, 1052 (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS, 0); 1053 1054 tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, r1, TCG_AREG0, r1, 0, 1055 offsetof(CPUState, tlb_table[mem_index][0]) 1056 + which); 1057 1058 /* cmp 0(r1), r0 */ 1059 tcg_out_modrm_offset(s, OPC_CMP_GvEv + rexw, r0, r1, 0); 1060 1061 tcg_out_mov(s, type, r0, addrlo); 1062 1063 /* jne label1 */ 1064 tcg_out8(s, OPC_JCC_short + JCC_JNE); 1065 label_ptr[0] = s->code_ptr; 1066 s->code_ptr++; 1067 1068 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { 1069 /* cmp 4(r1), addrhi */ 1070 tcg_out_modrm_offset(s, OPC_CMP_GvEv, args[addrlo_idx+1], r1, 4); 1071 1072 /* jne label1 */ 1073 tcg_out8(s, OPC_JCC_short + JCC_JNE); 1074 label_ptr[1] = s->code_ptr; 1075 s->code_ptr++; 1076 } 1077 1078 /* TLB Hit. */ 1079 1080 /* add addend(r1), r0 */ 1081 tcg_out_modrm_offset(s, OPC_ADD_GvEv + P_REXW, r0, r1, 1082 offsetof(CPUTLBEntry, addend) - which); 1083 } 1084 #endif 1085 1086 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo, int datahi, 1087 int base, tcg_target_long ofs, int sizeop) 1088 { 1089 #ifdef TARGET_WORDS_BIGENDIAN 1090 const int bswap = 1; 1091 #else 1092 const int bswap = 0; 1093 #endif 1094 switch (sizeop) { 1095 case 0: 1096 tcg_out_modrm_offset(s, OPC_MOVZBL, datalo, base, ofs); 1097 break; 1098 case 0 | 4: 1099 tcg_out_modrm_offset(s, OPC_MOVSBL + P_REXW, datalo, base, ofs); 1100 break; 1101 case 1: 1102 tcg_out_modrm_offset(s, OPC_MOVZWL, datalo, base, ofs); 1103 if (bswap) { 1104 tcg_out_rolw_8(s, datalo); 1105 } 1106 break; 1107 case 1 | 4: 1108 if (bswap) { 1109 tcg_out_modrm_offset(s, OPC_MOVZWL, datalo, base, ofs); 1110 tcg_out_rolw_8(s, datalo); 1111 tcg_out_modrm(s, OPC_MOVSWL + P_REXW, datalo, datalo); 1112 } else { 1113 tcg_out_modrm_offset(s, OPC_MOVSWL + P_REXW, datalo, base, ofs); 1114 } 1115 break; 1116 case 2: 1117 tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs); 1118 if (bswap) { 1119 tcg_out_bswap32(s, datalo); 1120 } 1121 break; 1122 #if TCG_TARGET_REG_BITS == 64 1123 case 2 | 4: 1124 if (bswap) { 1125 tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs); 1126 tcg_out_bswap32(s, datalo); 1127 tcg_out_ext32s(s, datalo, datalo); 1128 } else { 1129 tcg_out_modrm_offset(s, OPC_MOVSLQ, datalo, base, ofs); 1130 } 1131 break; 1132 #endif 1133 case 3: 1134 if (TCG_TARGET_REG_BITS == 64) { 1135 tcg_out_ld(s, TCG_TYPE_I64, datalo, base, ofs); 1136 if (bswap) { 1137 tcg_out_bswap64(s, datalo); 1138 } 1139 } else { 1140 if (bswap) { 1141 int t = datalo; 1142 datalo = datahi; 1143 datahi = t; 1144 } 1145 if (base != datalo) { 1146 tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs); 1147 tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4); 1148 } else { 1149 tcg_out_ld(s, TCG_TYPE_I32, datahi, base, ofs + 4); 1150 tcg_out_ld(s, TCG_TYPE_I32, datalo, base, ofs); 1151 } 1152 if (bswap) { 1153 tcg_out_bswap32(s, datalo); 1154 tcg_out_bswap32(s, datahi); 1155 } 1156 } 1157 break; 1158 default: 1159 tcg_abort(); 1160 } 1161 } 469 1162 470 1163 #if defined(VBOX) && defined(REM_PHYS_ADDR_IN_TLB) … … 570 1263 int opc) 571 1264 { 572 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap; 1265 int data_reg, data_reg2 = 0; 1266 int addrlo_idx; 573 1267 #if defined(CONFIG_SOFTMMU) 574 uint8_t *label1_ptr, *label2_ptr; 575 #endif 576 #if TARGET_LONG_BITS == 64 1268 int mem_index, s_bits, arg_idx; 1269 uint8_t *label_ptr[3]; 1270 #endif 1271 1272 data_reg = args[0]; 1273 addrlo_idx = 1; 1274 if (TCG_TARGET_REG_BITS == 32 && opc == 3) { 1275 data_reg2 = args[1]; 1276 addrlo_idx = 2; 1277 } 1278 577 1279 #if defined(CONFIG_SOFTMMU) 578 uint8_t *label3_ptr; 579 #endif 580 int addr_reg2; 581 #endif 582 583 data_reg = *args++; 584 if (opc == 3) 585 data_reg2 = *args++; 586 else 587 data_reg2 = 0; 588 addr_reg = *args++; 589 #if TARGET_LONG_BITS == 64 590 addr_reg2 = *args++; 591 #endif 592 mem_index = *args; 1280 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; 593 1281 s_bits = opc & 3; 594 1282 595 r0 = TCG_REG_EAX; 596 r1 = TCG_REG_EDX; 597 598 #if defined(CONFIG_SOFTMMU) 599 tcg_out_mov(s, r1, addr_reg); 600 601 tcg_out_mov(s, r0, addr_reg); 602 603 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ 604 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 605 606 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ 607 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); 608 609 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ 610 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 611 612 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 613 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 614 #ifndef VBOX 615 tcg_out8(s, (5 << 3) | r1); 616 #else 617 tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env, not %ebp */ 618 Assert(mem_index >= 0 && mem_index < NB_MMU_MODES); 619 #endif 620 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read)); 621 622 /* cmp 0(r1), r0 */ 623 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); 624 625 tcg_out_mov(s, r0, addr_reg); 626 627 #if TARGET_LONG_BITS == 32 628 /* je label1 */ 629 tcg_out8(s, 0x70 + JCC_JE); 630 label1_ptr = s->code_ptr; 1283 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, 1284 label_ptr, offsetof(CPUTLBEntry, addr_read)); 1285 1286 /* TLB Hit. */ 1287 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, 1288 tcg_target_call_iarg_regs[0], 0, opc); 1289 1290 /* jmp label2 */ 1291 tcg_out8(s, OPC_JMP_short); 1292 label_ptr[2] = s->code_ptr; 631 1293 s->code_ptr++; 632 #else 633 /* jne label3 */ 634 tcg_out8(s, 0x70 + JCC_JNE); 635 label3_ptr = s->code_ptr; 636 s->code_ptr++; 637 638 /* cmp 4(r1), addr_reg2 */ 639 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); 640 641 /* je label1 */ 642 tcg_out8(s, 0x70 + JCC_JE); 643 label1_ptr = s->code_ptr; 644 s->code_ptr++; 645 646 /* label3: */ 647 *label3_ptr = s->code_ptr - label3_ptr - 1; 648 #endif 1294 1295 /* TLB Miss. */ 1296 1297 /* label1: */ 1298 *label_ptr[0] = s->code_ptr - label_ptr[0] - 1; 1299 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { 1300 *label_ptr[1] = s->code_ptr - label_ptr[1] - 1; 1301 } 649 1302 650 1303 /* XXX: move that code at the end of the TB */ 651 #if TARGET_LONG_BITS == 32 652 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index); 653 #else 654 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 655 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); 656 #endif 657 #ifdef VBOX 658 tcg_gen_stack_alignment_check(s); 659 #endif 660 tcg_out8(s, 0xe8); 661 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] - 662 (tcg_target_long)s->code_ptr - 4); 1304 /* The first argument is already loaded with addrlo. */ 1305 arg_idx = 1; 1306 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) { 1307 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx++], 1308 args[addrlo_idx + 1]); 1309 } 1310 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[arg_idx], 1311 mem_index); 1312 tcg_out_calli(s, (tcg_target_long)qemu_ld_helpers[s_bits]); 663 1313 664 1314 switch(opc) { 665 1315 case 0 | 4: 666 /* movsbl */ 667 tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX); 1316 tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); 668 1317 break; 669 1318 case 1 | 4: 670 /* movswl */ 671 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX); 1319 tcg_out_ext16s(s, data_reg, TCG_REG_EAX, P_REXW); 672 1320 break; 673 1321 case 0: 674 /* movzbl */ 675 tcg_out_modrm(s, 0xb6 | P_EXT, data_reg, TCG_REG_EAX); 1322 tcg_out_ext8u(s, data_reg, TCG_REG_EAX); 676 1323 break; 677 1324 case 1: 678 /* movzwl */ 679 tcg_out_modrm(s, 0xb7 | P_EXT, data_reg, TCG_REG_EAX); 1325 tcg_out_ext16u(s, data_reg, TCG_REG_EAX); 680 1326 break; 681 1327 case 2: 1328 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); 1329 break; 1330 #if TCG_TARGET_REG_BITS == 64 1331 case 2 | 4: 1332 tcg_out_ext32s(s, data_reg, TCG_REG_EAX); 1333 break; 1334 #endif 1335 case 3: 1336 if (TCG_TARGET_REG_BITS == 64) { 1337 tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX); 1338 } else if (data_reg == TCG_REG_EDX) { 1339 /* xchg %edx, %eax */ 1340 tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); 1341 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX); 1342 } else { 1343 tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); 1344 tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX); 1345 } 1346 break; 682 1347 default: 683 tcg_out_mov(s, data_reg, TCG_REG_EAX); 684 break; 685 case 3: 686 if (data_reg == TCG_REG_EDX) { 687 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */ 688 tcg_out_mov(s, data_reg2, TCG_REG_EAX); 689 } else { 690 tcg_out_mov(s, data_reg, TCG_REG_EAX); 691 tcg_out_mov(s, data_reg2, TCG_REG_EDX); 692 } 693 break; 694 } 695 696 /* jmp label2 */ 697 tcg_out8(s, 0xeb); 698 label2_ptr = s->code_ptr; 699 s->code_ptr++; 700 701 /* label1: */ 702 *label1_ptr = s->code_ptr - label1_ptr - 1; 703 704 /* add x(r1), r0 */ 705 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) - 706 offsetof(CPUTLBEntry, addr_read)); 1348 tcg_abort(); 1349 } 1350 1351 /* label2: */ 1352 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1; 707 1353 #else 708 r0 = addr_reg; 709 #endif 710 1354 { 1355 int32_t offset = GUEST_BASE; 1356 int base = args[addrlo_idx]; 1357 1358 if (TCG_TARGET_REG_BITS == 64) { 1359 /* ??? We assume all operations have left us with register 1360 contents that are zero extended. So far this appears to 1361 be true. If we want to enforce this, we can either do 1362 an explicit zero-extension here, or (if GUEST_BASE == 0) 1363 use the ADDR32 prefix. For now, do nothing. */ 1364 1365 if (offset != GUEST_BASE) { 1366 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RDI, GUEST_BASE); 1367 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_RDI, base); 1368 base = TCG_REG_RDI, offset = 0; 1369 } 1370 } 1371 1372 tcg_out_qemu_ld_direct(s, data_reg, data_reg2, base, offset, opc); 1373 } 1374 #endif 1375 } 1376 1377 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo, int datahi, 1378 int base, tcg_target_long ofs, int sizeop) 1379 { 711 1380 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 712 1381 #ifdef TARGET_WORDS_BIGENDIAN 713 bswap = 1;1382 const int bswap = 1; 714 1383 #else 715 bswap = 0; 716 #endif 717 switch(opc) { 1384 const int bswap = 0; 1385 #endif 1386 /* ??? Ideally we wouldn't need a scratch register. For user-only, 1387 we could perform the bswap twice to restore the original value 1388 instead of moving to the scratch. But as it is, the L constraint 1389 means that the second argument reg is definitely free here. */ 1390 int scratch = tcg_target_call_iarg_regs[1]; 1391 1392 switch (sizeop) { 718 1393 case 0: 719 /* movzbl */ 720 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, GUEST_BASE); 721 break; 722 case 0 | 4: 723 /* movsbl */ 724 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, GUEST_BASE); 1394 tcg_out_modrm_offset(s, OPC_MOVB_EvGv + P_REXB_R, datalo, base, ofs); 725 1395 break; 726 1396 case 1: 727 /* movzwl */728 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, GUEST_BASE);729 1397 if (bswap) { 730 /* rolw $8, data_reg */ 731 tcg_out8(s, 0x66); 732 tcg_out_modrm(s, 0xc1, 0, data_reg); 733 tcg_out8(s, 8); 734 } 735 break; 736 case 1 | 4: 737 /* movswl */ 738 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, GUEST_BASE); 1398 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); 1399 tcg_out_rolw_8(s, scratch); 1400 datalo = scratch; 1401 } 1402 tcg_out_modrm_offset(s, OPC_MOVL_EvGv + P_DATA16, datalo, base, ofs); 1403 break; 1404 case 2: 739 1405 if (bswap) { 740 /* rolw $8, data_reg */ 741 tcg_out8(s, 0x66); 742 tcg_out_modrm(s, 0xc1, 0, data_reg); 743 tcg_out8(s, 8); 744 745 /* movswl data_reg, data_reg */ 746 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg); 747 } 748 break; 749 case 2: 750 /* movl (r0), data_reg */ 751 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE); 752 if (bswap) { 753 /* bswap */ 754 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT); 755 } 1406 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); 1407 tcg_out_bswap32(s, scratch); 1408 datalo = scratch; 1409 } 1410 tcg_out_st(s, TCG_TYPE_I32, datalo, base, ofs); 756 1411 break; 757 1412 case 3: 758 /* XXX: could be nicer */ 759 if (r0 == data_reg) { 760 r1 = TCG_REG_EDX; 761 if (r1 == data_reg) 762 r1 = TCG_REG_EAX; 763 tcg_out_mov(s, r1, r0); 764 r0 = r1; 765 } 766 if (!bswap) { 767 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE); 768 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE + 4); 769 } else { 770 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, GUEST_BASE + 4); 771 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT); 772 773 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, GUEST_BASE); 774 /* bswap */ 775 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT); 1413 if (TCG_TARGET_REG_BITS == 64) { 1414 if (bswap) { 1415 tcg_out_mov(s, TCG_TYPE_I64, scratch, datalo); 1416 tcg_out_bswap64(s, scratch); 1417 datalo = scratch; 1418 } 1419 tcg_out_st(s, TCG_TYPE_I64, datalo, base, ofs); 1420 } else if (bswap) { 1421 tcg_out_mov(s, TCG_TYPE_I32, scratch, datahi); 1422 tcg_out_bswap32(s, scratch); 1423 tcg_out_st(s, TCG_TYPE_I32, scratch, base, ofs); 1424 tcg_out_mov(s, TCG_TYPE_I32, scratch, datalo); 1425 tcg_out_bswap32(s, scratch); 1426 tcg_out_st(s, TCG_TYPE_I32, scratch, base, ofs + 4); 1427 } else { 1428 tcg_out_st(s, TCG_TYPE_I32, datalo, base, ofs); 1429 tcg_out_st(s, TCG_TYPE_I32, datahi, base, ofs + 4); 776 1430 } 777 1431 break; … … 780 1434 } 781 1435 #else /* VBOX */ 1436 # error "broken" 782 1437 tcg_out_vbox_phys_read(s, opc, r0, data_reg, data_reg2); 783 1438 #endif 784 785 786 #if defined(CONFIG_SOFTMMU) 787 /* label2: */ 788 *label2_ptr = s->code_ptr - label2_ptr - 1; 789 # ifdef VBOX 790 Assert((unsigned)(s->code_ptr - label2_ptr - 1) <= 127); 791 # endif 792 #endif 793 } 794 1439 } 795 1440 796 1441 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, 797 1442 int opc) 798 1443 { 799 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap; 1444 int data_reg, data_reg2 = 0; 1445 int addrlo_idx; 800 1446 #if defined(CONFIG_SOFTMMU) 801 uint8_t *label1_ptr, *label2_ptr; 802 #endif 803 #if TARGET_LONG_BITS == 64 1447 int mem_index, s_bits; 1448 int stack_adjust; 1449 uint8_t *label_ptr[3]; 1450 #endif 1451 1452 data_reg = args[0]; 1453 addrlo_idx = 1; 1454 if (TCG_TARGET_REG_BITS == 32 && opc == 3) { 1455 data_reg2 = args[1]; 1456 addrlo_idx = 2; 1457 } 1458 804 1459 #if defined(CONFIG_SOFTMMU) 805 uint8_t *label3_ptr; 806 #endif 807 int addr_reg2; 808 #endif 809 #ifdef VBOX 810 # ifdef RT_OS_DARWIN 811 int bias1 = 12, bias3 = 4;/** @todo TCG_TARGET_STACK_ALIGN. */ 812 # else 813 int bias1 = 0, bias3 = 0; 814 # endif 815 NOREF(bias3); 816 #endif 817 818 data_reg = *args++; 819 if (opc == 3) 820 data_reg2 = *args++; 821 else 822 data_reg2 = 0; 823 addr_reg = *args++; 824 #if TARGET_LONG_BITS == 64 825 addr_reg2 = *args++; 826 #endif 827 mem_index = *args; 828 1460 mem_index = args[addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)]; 829 1461 s_bits = opc; 830 1462 831 r0 = TCG_REG_EAX; 832 r1 = TCG_REG_EDX; 833 834 #if defined(CONFIG_SOFTMMU) 835 tcg_out_mov(s, r1, addr_reg); 836 837 tcg_out_mov(s, r0, addr_reg); 838 839 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ 840 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 841 842 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ 843 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); 844 845 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ 846 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 847 848 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 849 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 850 #ifndef VBOX 851 tcg_out8(s, (5 << 3) | r1); 852 #else 853 tcg_out8(s, (TCG_AREG0 << 3) | r1); /* env is not %ebp */ 854 Assert(mem_index >= 0 && mem_index < NB_MMU_MODES); 855 #endif 856 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write)); 857 858 /* cmp 0(r1), r0 */ 859 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); 860 861 tcg_out_mov(s, r0, addr_reg); 862 863 #if TARGET_LONG_BITS == 32 864 /* je label1 */ 865 tcg_out8(s, 0x70 + JCC_JE); 866 label1_ptr = s->code_ptr; 1463 tcg_out_tlb_load(s, addrlo_idx, mem_index, s_bits, args, 1464 label_ptr, offsetof(CPUTLBEntry, addr_write)); 1465 1466 /* TLB Hit. */ 1467 tcg_out_qemu_st_direct(s, data_reg, data_reg2, 1468 tcg_target_call_iarg_regs[0], 0, opc); 1469 1470 /* jmp label2 */ 1471 tcg_out8(s, OPC_JMP_short); 1472 label_ptr[2] = s->code_ptr; 867 1473 s->code_ptr++; 868 #else 869 /* jne label3 */ 870 tcg_out8(s, 0x70 + JCC_JNE); 871 label3_ptr = s->code_ptr; 872 s->code_ptr++; 873 874 /* cmp 4(r1), addr_reg2 */ 875 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); 876 877 /* je label1 */ 878 tcg_out8(s, 0x70 + JCC_JE); 879 label1_ptr = s->code_ptr; 880 s->code_ptr++; 881 882 /* label3: */ 883 *label3_ptr = s->code_ptr - label3_ptr - 1; 884 #endif 1474 1475 /* TLB Miss. */ 1476 1477 /* label1: */ 1478 *label_ptr[0] = s->code_ptr - label_ptr[0] - 1; 1479 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) { 1480 *label_ptr[1] = s->code_ptr - label_ptr[1] - 1; 1481 } 1482 1483 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 885 1484 886 1485 /* XXX: move that code at the end of the TB */ 887 #if TARGET_LONG_BITS == 32 888 if (opc == 3) { 889 tcg_out_mov(s, TCG_REG_EDX, data_reg); 890 tcg_out_mov(s, TCG_REG_ECX, data_reg2); 891 #ifdef VBOX 892 tcg_out_subi(s, TCG_REG_ESP, bias1); 893 #endif 894 tcg_out8(s, 0x6a); /* push Ib */ 895 tcg_out8(s, mem_index); 896 # ifdef VBOX 897 tcg_gen_stack_alignment_check(s); 898 # endif 899 tcg_out8(s, 0xe8); 900 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 901 (tcg_target_long)s->code_ptr - 4); 902 #ifdef VBOX 903 tcg_out_addi(s, TCG_REG_ESP, 4+bias1); 904 #else 905 tcg_out_addi(s, TCG_REG_ESP, 4); 906 #endif 1486 if (TCG_TARGET_REG_BITS == 64) { 1487 tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32), 1488 TCG_REG_RSI, data_reg); 1489 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_RDX, mem_index); 1490 stack_adjust = 0; 1491 } else if (TARGET_LONG_BITS == 32) { 1492 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, data_reg); 1493 if (opc == 3) { 1494 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg2); 1495 tcg_out_pushi(s, mem_index); 1496 stack_adjust = 4; 1497 } else { 1498 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); 1499 stack_adjust = 0; 1500 } 907 1501 } else { 908 switch(opc) { 909 case 0: 910 /* movzbl */ 911 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg); 912 break; 913 case 1: 914 /* movzwl */ 915 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg); 916 break; 917 case 2: 918 tcg_out_mov(s, TCG_REG_EDX, data_reg); 919 break; 920 } 921 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); 922 # ifdef VBOX 923 tcg_gen_stack_alignment_check(s); 924 # endif 925 tcg_out8(s, 0xe8); 926 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 927 (tcg_target_long)s->code_ptr - 4); 928 } 929 #else 930 if (opc == 3) { 931 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 932 # ifdef VBOX 933 tcg_out_subi(s, TCG_REG_ESP, bias3); 934 # endif 935 tcg_out8(s, 0x6a); /* push Ib */ 936 tcg_out8(s, mem_index); 937 tcg_out_opc(s, 0x50 + data_reg2); /* push */ 938 tcg_out_opc(s, 0x50 + data_reg); /* push */ 939 # ifdef VBOX 940 tcg_gen_stack_alignment_check(s); 941 # endif 942 tcg_out8(s, 0xe8); 943 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 944 (tcg_target_long)s->code_ptr - 4); 945 #ifdef VBOX 946 tcg_out_addi(s, TCG_REG_ESP, 12+bias3); 947 #else 948 tcg_out_addi(s, TCG_REG_ESP, 12); 949 #endif 950 } else { 951 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 952 switch(opc) { 953 case 0: 954 /* movzbl */ 955 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg); 956 break; 957 case 1: 958 /* movzwl */ 959 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg); 960 break; 961 case 2: 962 tcg_out_mov(s, TCG_REG_ECX, data_reg); 963 break; 964 } 965 # ifdef VBOX 966 tcg_out_subi(s, TCG_REG_ESP, bias1); 967 # endif 968 tcg_out8(s, 0x6a); /* push Ib */ 969 tcg_out8(s, mem_index); 970 # ifdef VBOX 971 tcg_gen_stack_alignment_check(s); 972 # endif 973 tcg_out8(s, 0xe8); 974 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 975 (tcg_target_long)s->code_ptr - 4); 976 # if defined(VBOX) 977 tcg_out_addi(s, TCG_REG_ESP, 4 + bias1); 978 # else 979 tcg_out_addi(s, TCG_REG_ESP, 4); 980 # endif 981 } 982 #endif 983 984 /* jmp label2 */ 985 tcg_out8(s, 0xeb); 986 label2_ptr = s->code_ptr; 987 s->code_ptr++; 988 989 /* label1: */ 990 *label1_ptr = s->code_ptr - label1_ptr - 1; 991 992 /* add x(r1), r0 */ 993 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) - 994 offsetof(CPUTLBEntry, addr_write)); 995 #else 996 r0 = addr_reg; 997 #endif 998 999 #if !defined(VBOX) || !defined(REM_PHYS_ADDR_IN_TLB) 1000 #ifdef TARGET_WORDS_BIGENDIAN 1001 bswap = 1; 1002 #else 1003 bswap = 0; 1004 #endif 1005 switch(opc) { 1006 case 0: 1007 /* movb */ 1008 tcg_out_modrm_offset(s, 0x88, data_reg, r0, GUEST_BASE); 1009 break; 1010 case 1: 1011 if (bswap) { 1012 tcg_out_mov(s, r1, data_reg); 1013 tcg_out8(s, 0x66); /* rolw $8, %ecx */ 1014 tcg_out_modrm(s, 0xc1, 0, r1); 1015 tcg_out8(s, 8); 1016 data_reg = r1; 1017 } 1018 /* movw */ 1019 tcg_out8(s, 0x66); 1020 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1021 break; 1022 case 2: 1023 if (bswap) { 1024 tcg_out_mov(s, r1, data_reg); 1025 /* bswap data_reg */ 1026 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 1027 data_reg = r1; 1028 } 1029 /* movl */ 1030 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1031 break; 1032 case 3: 1033 if (bswap) { 1034 tcg_out_mov(s, r1, data_reg2); 1035 /* bswap data_reg */ 1036 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 1037 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE); 1038 tcg_out_mov(s, r1, data_reg); 1039 /* bswap data_reg */ 1040 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 1041 tcg_out_modrm_offset(s, 0x89, r1, r0, GUEST_BASE + 4); 1042 } else { 1043 tcg_out_modrm_offset(s, 0x89, data_reg, r0, GUEST_BASE); 1044 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, GUEST_BASE + 4); 1045 } 1046 break; 1047 default: 1048 tcg_abort(); 1049 } 1502 if (opc == 3) { 1503 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]); 1504 tcg_out_pushi(s, mem_index); 1505 tcg_out_push(s, data_reg2); 1506 tcg_out_push(s, data_reg); 1507 stack_adjust = 12; 1508 } else { 1509 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_EDX, args[addrlo_idx + 1]); 1510 switch(opc) { 1511 case 0: 1512 tcg_out_ext8u(s, TCG_REG_ECX, data_reg); 1513 break; 1514 case 1: 1515 tcg_out_ext16u(s, TCG_REG_ECX, data_reg); 1516 break; 1517 case 2: 1518 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_ECX, data_reg); 1519 break; 1520 } 1521 tcg_out_pushi(s, mem_index); 1522 stack_adjust = 4; 1523 } 1524 } 1525 1526 tcg_out_calli(s, (tcg_target_long)qemu_st_helpers[s_bits]); 1527 1528 if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) { 1529 /* Pop and discard. This is 2 bytes smaller than the add. */ 1530 tcg_out_pop(s, TCG_REG_ECX); 1531 } else if (stack_adjust != 0) { 1532 tcg_out_addi(s, TCG_REG_ESP, stack_adjust); 1533 } 1534 1050 1535 #else /* VBOX && REM_PHYS_ADDR_IN_TLB */ 1536 # error Borked 1051 1537 tcg_out_vbox_phys_write(s, opc, r0, data_reg, data_reg2); 1052 1538 #endif /* VBOX && REM_PHYS_ADDR_IN_TLB */ 1053 1539 1054 #if defined(CONFIG_SOFTMMU)1055 1540 /* label2: */ 1056 *label2_ptr = s->code_ptr - label2_ptr - 1; 1057 # ifdef VBOX 1058 Assert((unsigned)(s->code_ptr - label2_ptr - 1) <= 127); 1059 # endif 1060 #endif 1061 } 1062 1063 static inline void tcg_out_op(TCGContext *s, int opc, 1541 *label_ptr[2] = s->code_ptr - label_ptr[2] - 1; 1542 #else 1543 { 1544 int32_t offset = GUEST_BASE; 1545 int base = args[addrlo_idx]; 1546 1547 if (TCG_TARGET_REG_BITS == 64) { 1548 /* ??? We assume all operations have left us with register 1549 contents that are zero extended. So far this appears to 1550 be true. If we want to enforce this, we can either do 1551 an explicit zero-extension here, or (if GUEST_BASE == 0) 1552 use the ADDR32 prefix. For now, do nothing. */ 1553 1554 if (offset != GUEST_BASE) { 1555 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_RDI, GUEST_BASE); 1556 tgen_arithr(s, ARITH_ADD + P_REXW, TCG_REG_RDI, base); 1557 base = TCG_REG_RDI, offset = 0; 1558 } 1559 } 1560 1561 tcg_out_qemu_st_direct(s, data_reg, data_reg2, base, offset, opc); 1562 } 1563 #endif 1564 } 1565 1566 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, 1064 1567 const TCGArg *args, const int *const_args) 1065 1568 { 1066 int c; 1569 int c, rexw = 0; 1570 1571 #if TCG_TARGET_REG_BITS == 64 1572 # define OP_32_64(x) \ 1573 case glue(glue(INDEX_op_, x), _i64): \ 1574 rexw = P_REXW; /* FALLTHRU */ \ 1575 case glue(glue(INDEX_op_, x), _i32) 1576 #else 1577 # define OP_32_64(x) \ 1578 case glue(glue(INDEX_op_, x), _i32) 1579 #endif 1067 1580 1068 1581 switch(opc) { 1069 1582 case INDEX_op_exit_tb: 1070 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]); 1071 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */ 1072 tcg_out32(s, tb_ret_addr - s->code_ptr - 4); 1583 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_EAX, args[0]); 1584 tcg_out_jmp(s, (tcg_target_long) tb_ret_addr); 1073 1585 break; 1074 1586 case INDEX_op_goto_tb: 1075 1587 if (s->tb_jmp_offset) { 1076 1588 /* direct jump method */ 1077 tcg_out8(s, 0xe9); /* jmp im */1589 tcg_out8(s, OPC_JMP_long); /* jmp im */ 1078 1590 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; 1079 1591 tcg_out32(s, 0); 1080 1592 } else { 1081 1593 /* indirect jump method */ 1082 /* jmp Ev */ 1083 tcg_out_modrm_offset(s, 0xff, 4, -1, 1594 tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, 1084 1595 (tcg_target_long)(s->tb_next + args[0])); 1085 1596 } … … 1087 1598 break; 1088 1599 case INDEX_op_call: 1089 #ifdef VBOX1090 tcg_gen_stack_alignment_check(s);1091 #endif1092 1600 if (const_args[0]) { 1093 tcg_out 8(s, 0xe8);1094 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);1095 } else {1096 tcg_out_modrm(s, 0xff, 2, args[0]);1601 tcg_out_calli(s, args[0]); 1602 } else { 1603 /* call *reg */ 1604 tcg_out_modrm(s, OPC_GRP5, EXT5_CALLN_Ev, args[0]); 1097 1605 } 1098 1606 break; 1099 1607 case INDEX_op_jmp: 1100 1608 if (const_args[0]) { 1101 tcg_out 8(s, 0xe9);1102 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4);1103 } else {1104 tcg_out_modrm(s, 0xff, 4, args[0]);1609 tcg_out_jmp(s, args[0]); 1610 } else { 1611 /* jmp *reg */ 1612 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, args[0]); 1105 1613 } 1106 1614 break; 1107 1615 case INDEX_op_br: 1108 tcg_out_jxx(s, JCC_JMP, args[0] );1616 tcg_out_jxx(s, JCC_JMP, args[0], 0); 1109 1617 break; 1110 1618 case INDEX_op_movi_i32: 1111 1619 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); 1112 1620 break; 1113 case INDEX_op_ld8u_i32: 1114 /* movzbl */ 1115 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]); 1116 break; 1117 case INDEX_op_ld8s_i32: 1118 /* movsbl */ 1119 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]); 1120 break; 1121 case INDEX_op_ld16u_i32: 1122 /* movzwl */ 1123 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]); 1124 break; 1125 case INDEX_op_ld16s_i32: 1126 /* movswl */ 1127 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]); 1128 break; 1621 OP_32_64(ld8u): 1622 /* Note that we can ignore REXW for the zero-extend to 64-bit. */ 1623 tcg_out_modrm_offset(s, OPC_MOVZBL, args[0], args[1], args[2]); 1624 break; 1625 OP_32_64(ld8s): 1626 tcg_out_modrm_offset(s, OPC_MOVSBL + rexw, args[0], args[1], args[2]); 1627 break; 1628 OP_32_64(ld16u): 1629 /* Note that we can ignore REXW for the zero-extend to 64-bit. */ 1630 tcg_out_modrm_offset(s, OPC_MOVZWL, args[0], args[1], args[2]); 1631 break; 1632 OP_32_64(ld16s): 1633 tcg_out_modrm_offset(s, OPC_MOVSWL + rexw, args[0], args[1], args[2]); 1634 break; 1635 #if TCG_TARGET_REG_BITS == 64 1636 case INDEX_op_ld32u_i64: 1637 #endif 1129 1638 case INDEX_op_ld_i32: 1130 /* movl */ 1131 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]); 1132 break; 1133 case INDEX_op_st8_i32: 1134 /* movb */ 1135 tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]); 1136 break; 1137 case INDEX_op_st16_i32: 1138 /* movw */ 1139 tcg_out8(s, 0x66); 1140 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); 1141 break; 1639 tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]); 1640 break; 1641 1642 OP_32_64(st8): 1643 tcg_out_modrm_offset(s, OPC_MOVB_EvGv | P_REXB_R, 1644 args[0], args[1], args[2]); 1645 break; 1646 OP_32_64(st16): 1647 tcg_out_modrm_offset(s, OPC_MOVL_EvGv | P_DATA16, 1648 args[0], args[1], args[2]); 1649 break; 1650 #if TCG_TARGET_REG_BITS == 64 1651 case INDEX_op_st32_i64: 1652 #endif 1142 1653 case INDEX_op_st_i32: 1143 /* movl */ 1144 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); 1145 break; 1146 case INDEX_op_sub_i32: 1654 tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]); 1655 break; 1656 1657 OP_32_64(add): 1658 /* For 3-operand addition, use LEA. */ 1659 if (args[0] != args[1]) { 1660 TCGArg a0 = args[0], a1 = args[1], a2 = args[2], c3 = 0; 1661 1662 if (const_args[2]) { 1663 c3 = a2, a2 = -1; 1664 } else if (a0 == a2) { 1665 /* Watch out for dest = src + dest, since we've removed 1666 the matching constraint on the add. */ 1667 tgen_arithr(s, ARITH_ADD + rexw, a0, a1); 1668 break; 1669 } 1670 1671 tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a2, 0, c3); 1672 break; 1673 } 1674 c = ARITH_ADD; 1675 goto gen_arith; 1676 OP_32_64(sub): 1147 1677 c = ARITH_SUB; 1148 1678 goto gen_arith; 1149 case INDEX_op_and_i32:1679 OP_32_64(and): 1150 1680 c = ARITH_AND; 1151 1681 goto gen_arith; 1152 case INDEX_op_or_i32:1682 OP_32_64(or): 1153 1683 c = ARITH_OR; 1154 1684 goto gen_arith; 1155 case INDEX_op_xor_i32:1685 OP_32_64(xor): 1156 1686 c = ARITH_XOR; 1157 1687 goto gen_arith; 1158 case INDEX_op_add_i32:1159 c = ARITH_ADD;1160 1688 gen_arith: 1161 1689 if (const_args[2]) { 1162 tgen_arithi(s, c, args[0], args[2], 0); 1163 } else { 1164 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]); 1165 } 1166 break; 1167 case INDEX_op_mul_i32: 1690 tgen_arithi(s, c + rexw, args[0], args[2], 0); 1691 } else { 1692 tgen_arithr(s, c + rexw, args[0], args[2]); 1693 } 1694 break; 1695 1696 OP_32_64(mul): 1168 1697 if (const_args[2]) { 1169 1698 int32_t val; 1170 1699 val = args[2]; 1171 1700 if (val == (int8_t)val) { 1172 tcg_out_modrm(s, 0x6b, args[0], args[0]);1701 tcg_out_modrm(s, OPC_IMUL_GvEvIb + rexw, args[0], args[0]); 1173 1702 tcg_out8(s, val); 1174 1703 } else { 1175 tcg_out_modrm(s, 0x69, args[0], args[0]);1704 tcg_out_modrm(s, OPC_IMUL_GvEvIz + rexw, args[0], args[0]); 1176 1705 tcg_out32(s, val); 1177 1706 } 1178 1707 } else { 1179 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]); 1180 } 1181 break; 1182 case INDEX_op_mulu2_i32: 1183 tcg_out_modrm(s, 0xf7, 4, args[3]); 1184 break; 1185 case INDEX_op_div2_i32: 1186 tcg_out_modrm(s, 0xf7, 7, args[4]); 1187 break; 1188 case INDEX_op_divu2_i32: 1189 tcg_out_modrm(s, 0xf7, 6, args[4]); 1190 break; 1191 case INDEX_op_shl_i32: 1708 tcg_out_modrm(s, OPC_IMUL_GvEv + rexw, args[0], args[2]); 1709 } 1710 break; 1711 1712 OP_32_64(div2): 1713 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_IDIV, args[4]); 1714 break; 1715 OP_32_64(divu2): 1716 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_DIV, args[4]); 1717 break; 1718 1719 OP_32_64(shl): 1192 1720 c = SHIFT_SHL; 1193 gen_shift32: 1721 goto gen_shift; 1722 OP_32_64(shr): 1723 c = SHIFT_SHR; 1724 goto gen_shift; 1725 OP_32_64(sar): 1726 c = SHIFT_SAR; 1727 goto gen_shift; 1728 OP_32_64(rotl): 1729 c = SHIFT_ROL; 1730 goto gen_shift; 1731 OP_32_64(rotr): 1732 c = SHIFT_ROR; 1733 goto gen_shift; 1734 gen_shift: 1194 1735 if (const_args[2]) { 1195 if (args[2] == 1) { 1196 tcg_out_modrm(s, 0xd1, c, args[0]); 1197 } else { 1198 tcg_out_modrm(s, 0xc1, c, args[0]); 1199 tcg_out8(s, args[2]); 1200 } 1201 } else { 1202 tcg_out_modrm(s, 0xd3, c, args[0]); 1203 } 1204 break; 1205 case INDEX_op_shr_i32: 1206 c = SHIFT_SHR; 1207 goto gen_shift32; 1208 case INDEX_op_sar_i32: 1209 c = SHIFT_SAR; 1210 goto gen_shift32; 1211 case INDEX_op_rotl_i32: 1212 c = SHIFT_ROL; 1213 goto gen_shift32; 1214 case INDEX_op_rotr_i32: 1215 c = SHIFT_ROR; 1216 goto gen_shift32; 1217 1218 case INDEX_op_add2_i32: 1219 if (const_args[4]) 1220 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1); 1221 else 1222 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]); 1223 if (const_args[5]) 1224 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1); 1225 else 1226 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]); 1227 break; 1228 case INDEX_op_sub2_i32: 1229 if (const_args[4]) 1230 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1); 1231 else 1232 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]); 1233 if (const_args[5]) 1234 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1); 1235 else 1236 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]); 1237 break; 1736 tcg_out_shifti(s, c + rexw, args[0], args[2]); 1737 } else { 1738 tcg_out_modrm(s, OPC_SHIFT_cl + rexw, c, args[0]); 1739 } 1740 break; 1741 1238 1742 case INDEX_op_brcond_i32: 1239 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]); 1240 break; 1241 case INDEX_op_brcond2_i32: 1242 tcg_out_brcond2(s, args, const_args); 1243 break; 1244 1245 case INDEX_op_bswap16_i32: 1246 tcg_out8(s, 0x66); 1247 tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]); 1248 tcg_out8(s, 8); 1249 break; 1250 case INDEX_op_bswap32_i32: 1251 tcg_out_opc(s, (0xc8 + args[0]) | P_EXT); 1252 break; 1253 1254 case INDEX_op_neg_i32: 1255 tcg_out_modrm(s, 0xf7, 3, args[0]); 1256 break; 1257 1258 case INDEX_op_not_i32: 1259 tcg_out_modrm(s, 0xf7, 2, args[0]); 1260 break; 1261 1262 case INDEX_op_ext8s_i32: 1263 tcg_out_modrm(s, 0xbe | P_EXT, args[0], args[1]); 1264 break; 1265 case INDEX_op_ext16s_i32: 1266 tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]); 1267 break; 1268 case INDEX_op_ext8u_i32: 1269 tcg_out_modrm(s, 0xb6 | P_EXT, args[0], args[1]); 1270 break; 1271 case INDEX_op_ext16u_i32: 1272 tcg_out_modrm(s, 0xb7 | P_EXT, args[0], args[1]); 1743 tcg_out_brcond32(s, args[2], args[0], args[1], const_args[1], 1744 args[3], 0); 1745 break; 1746 case INDEX_op_setcond_i32: 1747 tcg_out_setcond32(s, args[3], args[0], args[1], 1748 args[2], const_args[2]); 1749 break; 1750 1751 OP_32_64(bswap16): 1752 tcg_out_rolw_8(s, args[0]); 1753 break; 1754 OP_32_64(bswap32): 1755 tcg_out_bswap32(s, args[0]); 1756 break; 1757 1758 OP_32_64(neg): 1759 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NEG, args[0]); 1760 break; 1761 OP_32_64(not): 1762 tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_NOT, args[0]); 1763 break; 1764 1765 OP_32_64(ext8s): 1766 tcg_out_ext8s(s, args[0], args[1], rexw); 1767 break; 1768 OP_32_64(ext16s): 1769 tcg_out_ext16s(s, args[0], args[1], rexw); 1770 break; 1771 OP_32_64(ext8u): 1772 tcg_out_ext8u(s, args[0], args[1]); 1773 break; 1774 OP_32_64(ext16u): 1775 tcg_out_ext16u(s, args[0], args[1]); 1273 1776 break; 1274 1777 … … 1285 1788 tcg_out_qemu_ld(s, args, 1 | 4); 1286 1789 break; 1790 #if TCG_TARGET_REG_BITS == 64 1287 1791 case INDEX_op_qemu_ld32u: 1792 #endif 1793 case INDEX_op_qemu_ld32: 1288 1794 tcg_out_qemu_ld(s, args, 2); 1289 1795 break; … … 1305 1811 break; 1306 1812 1813 #if TCG_TARGET_REG_BITS == 32 1814 case INDEX_op_brcond2_i32: 1815 tcg_out_brcond2(s, args, const_args, 0); 1816 break; 1817 case INDEX_op_setcond2_i32: 1818 tcg_out_setcond2(s, args, const_args); 1819 break; 1820 case INDEX_op_mulu2_i32: 1821 tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_MUL, args[3]); 1822 break; 1823 case INDEX_op_add2_i32: 1824 if (const_args[4]) { 1825 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1); 1826 } else { 1827 tgen_arithr(s, ARITH_ADD, args[0], args[4]); 1828 } 1829 if (const_args[5]) { 1830 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1); 1831 } else { 1832 tgen_arithr(s, ARITH_ADC, args[1], args[5]); 1833 } 1834 break; 1835 case INDEX_op_sub2_i32: 1836 if (const_args[4]) { 1837 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1); 1838 } else { 1839 tgen_arithr(s, ARITH_SUB, args[0], args[4]); 1840 } 1841 if (const_args[5]) { 1842 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1); 1843 } else { 1844 tgen_arithr(s, ARITH_SBB, args[1], args[5]); 1845 } 1846 break; 1847 #else /* TCG_TARGET_REG_BITS == 64 */ 1848 case INDEX_op_movi_i64: 1849 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]); 1850 break; 1851 case INDEX_op_ld32s_i64: 1852 tcg_out_modrm_offset(s, OPC_MOVSLQ, args[0], args[1], args[2]); 1853 break; 1854 case INDEX_op_ld_i64: 1855 tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]); 1856 break; 1857 case INDEX_op_st_i64: 1858 tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]); 1859 break; 1860 case INDEX_op_qemu_ld32s: 1861 tcg_out_qemu_ld(s, args, 2 | 4); 1862 break; 1863 1864 case INDEX_op_brcond_i64: 1865 tcg_out_brcond64(s, args[2], args[0], args[1], const_args[1], 1866 args[3], 0); 1867 break; 1868 case INDEX_op_setcond_i64: 1869 tcg_out_setcond64(s, args[3], args[0], args[1], 1870 args[2], const_args[2]); 1871 break; 1872 1873 case INDEX_op_bswap64_i64: 1874 tcg_out_bswap64(s, args[0]); 1875 break; 1876 case INDEX_op_ext32u_i64: 1877 tcg_out_ext32u(s, args[0], args[1]); 1878 break; 1879 case INDEX_op_ext32s_i64: 1880 tcg_out_ext32s(s, args[0], args[1]); 1881 break; 1882 #endif 1883 1307 1884 default: 1308 1885 tcg_abort(); 1309 1886 } 1887 1888 #undef OP_32_64 1310 1889 } 1311 1890 1312 1891 static const TCGTargetOpDef x86_op_defs[] = { 1313 { INDEX_op_exit_tb, { "", ""} },1314 { INDEX_op_goto_tb, { "", ""} },1315 { INDEX_op_call, { "ri" , "",} },1316 { INDEX_op_jmp, { "ri" , ""} },1317 { INDEX_op_br, { "", ""} },1892 { INDEX_op_exit_tb, { } }, 1893 { INDEX_op_goto_tb, { } }, 1894 { INDEX_op_call, { "ri" } }, 1895 { INDEX_op_jmp, { "ri" } }, 1896 { INDEX_op_br, { } }, 1318 1897 { INDEX_op_mov_i32, { "r", "r" } }, 1319 1898 { INDEX_op_movi_i32, { "r" } }, … … 1327 1906 { INDEX_op_st_i32, { "r", "r" } }, 1328 1907 1329 { INDEX_op_add_i32, { "r", " 0", "ri" } },1908 { INDEX_op_add_i32, { "r", "r", "ri" } }, 1330 1909 { INDEX_op_sub_i32, { "r", "0", "ri" } }, 1331 1910 { INDEX_op_mul_i32, { "r", "0", "ri" } }, 1332 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } },1333 1911 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, 1334 1912 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, … … 1345 1923 { INDEX_op_brcond_i32, { "r", "ri" } }, 1346 1924 1925 { INDEX_op_bswap16_i32, { "r", "0" } }, 1926 { INDEX_op_bswap32_i32, { "r", "0" } }, 1927 1928 { INDEX_op_neg_i32, { "r", "0" } }, 1929 1930 { INDEX_op_not_i32, { "r", "0" } }, 1931 1932 { INDEX_op_ext8s_i32, { "r", "q" } }, 1933 { INDEX_op_ext16s_i32, { "r", "r" } }, 1934 { INDEX_op_ext8u_i32, { "r", "q" } }, 1935 { INDEX_op_ext16u_i32, { "r", "r" } }, 1936 1937 { INDEX_op_setcond_i32, { "q", "r", "ri" } }, 1938 1939 #if TCG_TARGET_REG_BITS == 32 1940 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, 1347 1941 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, 1348 1942 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, 1349 1943 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, 1350 1351 { INDEX_op_bswap16_i32, { "r", "0" } }, 1352 { INDEX_op_bswap32_i32, { "r", "0" } }, 1353 1354 { INDEX_op_neg_i32, { "r", "0" } }, 1355 1356 { INDEX_op_not_i32, { "r", "0" } }, 1357 1358 { INDEX_op_ext8s_i32, { "r", "q" } }, 1359 { INDEX_op_ext16s_i32, { "r", "r" } }, 1360 { INDEX_op_ext8u_i32, { "r", "q"} }, 1361 { INDEX_op_ext16u_i32, { "r", "r"} }, 1362 1363 #if TARGET_LONG_BITS == 32 1944 { INDEX_op_setcond2_i32, { "r", "r", "r", "ri", "ri" } }, 1945 #else 1946 { INDEX_op_mov_i64, { "r", "r" } }, 1947 { INDEX_op_movi_i64, { "r" } }, 1948 { INDEX_op_ld8u_i64, { "r", "r" } }, 1949 { INDEX_op_ld8s_i64, { "r", "r" } }, 1950 { INDEX_op_ld16u_i64, { "r", "r" } }, 1951 { INDEX_op_ld16s_i64, { "r", "r" } }, 1952 { INDEX_op_ld32u_i64, { "r", "r" } }, 1953 { INDEX_op_ld32s_i64, { "r", "r" } }, 1954 { INDEX_op_ld_i64, { "r", "r" } }, 1955 { INDEX_op_st8_i64, { "r", "r" } }, 1956 { INDEX_op_st16_i64, { "r", "r" } }, 1957 { INDEX_op_st32_i64, { "r", "r" } }, 1958 { INDEX_op_st_i64, { "r", "r" } }, 1959 1960 { INDEX_op_add_i64, { "r", "0", "re" } }, 1961 { INDEX_op_mul_i64, { "r", "0", "re" } }, 1962 { INDEX_op_div2_i64, { "a", "d", "0", "1", "r" } }, 1963 { INDEX_op_divu2_i64, { "a", "d", "0", "1", "r" } }, 1964 { INDEX_op_sub_i64, { "r", "0", "re" } }, 1965 { INDEX_op_and_i64, { "r", "0", "reZ" } }, 1966 { INDEX_op_or_i64, { "r", "0", "re" } }, 1967 { INDEX_op_xor_i64, { "r", "0", "re" } }, 1968 1969 { INDEX_op_shl_i64, { "r", "0", "ci" } }, 1970 { INDEX_op_shr_i64, { "r", "0", "ci" } }, 1971 { INDEX_op_sar_i64, { "r", "0", "ci" } }, 1972 { INDEX_op_rotl_i64, { "r", "0", "ci" } }, 1973 { INDEX_op_rotr_i64, { "r", "0", "ci" } }, 1974 1975 { INDEX_op_brcond_i64, { "r", "re" } }, 1976 { INDEX_op_setcond_i64, { "r", "r", "re" } }, 1977 1978 { INDEX_op_bswap16_i64, { "r", "0" } }, 1979 { INDEX_op_bswap32_i64, { "r", "0" } }, 1980 { INDEX_op_bswap64_i64, { "r", "0" } }, 1981 { INDEX_op_neg_i64, { "r", "0" } }, 1982 { INDEX_op_not_i64, { "r", "0" } }, 1983 1984 { INDEX_op_ext8s_i64, { "r", "r" } }, 1985 { INDEX_op_ext16s_i64, { "r", "r" } }, 1986 { INDEX_op_ext32s_i64, { "r", "r" } }, 1987 { INDEX_op_ext8u_i64, { "r", "r" } }, 1988 { INDEX_op_ext16u_i64, { "r", "r" } }, 1989 { INDEX_op_ext32u_i64, { "r", "r" } }, 1990 #endif 1991 1992 #if TCG_TARGET_REG_BITS == 64 1364 1993 { INDEX_op_qemu_ld8u, { "r", "L" } }, 1365 1994 { INDEX_op_qemu_ld8s, { "r", "L" } }, 1366 1995 { INDEX_op_qemu_ld16u, { "r", "L" } }, 1367 1996 { INDEX_op_qemu_ld16s, { "r", "L" } }, 1997 { INDEX_op_qemu_ld32, { "r", "L" } }, 1368 1998 { INDEX_op_qemu_ld32u, { "r", "L" } }, 1999 { INDEX_op_qemu_ld32s, { "r", "L" } }, 2000 { INDEX_op_qemu_ld64, { "r", "L" } }, 2001 2002 { INDEX_op_qemu_st8, { "L", "L" } }, 2003 { INDEX_op_qemu_st16, { "L", "L" } }, 2004 { INDEX_op_qemu_st32, { "L", "L" } }, 2005 { INDEX_op_qemu_st64, { "L", "L" } }, 2006 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS 2007 { INDEX_op_qemu_ld8u, { "r", "L" } }, 2008 { INDEX_op_qemu_ld8s, { "r", "L" } }, 2009 { INDEX_op_qemu_ld16u, { "r", "L" } }, 2010 { INDEX_op_qemu_ld16s, { "r", "L" } }, 2011 { INDEX_op_qemu_ld32, { "r", "L" } }, 1369 2012 { INDEX_op_qemu_ld64, { "r", "r", "L" } }, 1370 2013 … … 1378 2021 { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, 1379 2022 { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, 1380 { INDEX_op_qemu_ld32 u, { "r", "L", "L" } },2023 { INDEX_op_qemu_ld32, { "r", "L", "L" } }, 1381 2024 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, 1382 2025 … … 1386 2029 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, 1387 2030 #endif 1388 #ifndef VBOX1389 2031 { -1 }, 2032 }; 2033 2034 static int tcg_target_callee_save_regs[] = { 2035 #if TCG_TARGET_REG_BITS == 64 2036 TCG_REG_RBP, 2037 TCG_REG_RBX, 2038 TCG_REG_R12, 2039 TCG_REG_R13, 2040 /* TCG_REG_R14, */ /* Currently used for the global env. */ 2041 TCG_REG_R15, 1390 2042 #else 1391 { -1, {"", "", "", ""} }, 1392 #endif 1393 }; 1394 1395 static int tcg_target_callee_save_regs[] = { 1396 #ifndef VBOX 1397 /* TCG_REG_EBP, */ /* currently used for the global env, so no 1398 need to save */ 2043 # ifndef VBOX 2044 /* TCG_REG_EBP, */ /* Currently used for the global env. */ 1399 2045 TCG_REG_EBX, 1400 2046 TCG_REG_ESI, 1401 2047 TCG_REG_EDI, 1402 # else2048 # else 1403 2049 TCG_REG_EBP, 1404 2050 TCG_REG_EBX, 1405 /* TCG_REG_ESI, */ /* currently used for the global env, so no 1406 need to save */ 2051 /* TCG_REG_ESI, */ /* Currently used for the global env. */ 1407 2052 TCG_REG_EDI, 2053 # endif 1408 2054 #endif 1409 2055 }; 1410 2056 1411 static inline void tcg_out_push(TCGContext *s, int reg)1412 {1413 tcg_out_opc(s, 0x50 + reg);1414 }1415 1416 static inline void tcg_out_pop(TCGContext *s, int reg)1417 {1418 tcg_out_opc(s, 0x58 + reg);1419 }1420 1421 2057 /* Generate global QEMU prologue and epilogue code */ 1422 void tcg_target_qemu_prologue(TCGContext *s)2058 static void tcg_target_qemu_prologue(TCGContext *s) 1423 2059 { 1424 2060 int i, frame_size, push_size, stack_addend; 1425 2061 1426 2062 /* TB prologue */ 1427 /* save all callee saved registers */ 1428 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 2063 2064 /* Save all callee saved registers. */ 2065 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1429 2066 tcg_out_push(s, tcg_target_callee_save_regs[i]); 1430 2067 } 1431 /* reserve some stack space */ 1432 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4; 2068 # if defined(VBOX_STRICT) && defined(RT_ARCH_X86) 2069 tcg_out8(s, 0x31); /* xor ebp, ebp */ 2070 tcg_out8(s, 0xed); 2071 # endif 2072 2073 /* Reserve some stack space. */ 2074 push_size = 1 + ARRAY_SIZE(tcg_target_callee_save_regs); 2075 push_size *= TCG_TARGET_REG_BITS / 8; 2076 1433 2077 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE; 1434 2078 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & … … 1436 2080 stack_addend = frame_size - push_size; 1437 2081 tcg_out_addi(s, TCG_REG_ESP, -stack_addend); 2082 2083 /* jmp *tb. */ 2084 tcg_out_modrm(s, OPC_GRP5, EXT5_JMPN_Ev, tcg_target_call_iarg_regs[0]); 1438 2085 # ifdef VBOX 1439 2086 tcg_gen_stack_alignment_check(s); … … 1444 2091 /* TB epilogue */ 1445 2092 tb_ret_addr = s->code_ptr; 2093 1446 2094 tcg_out_addi(s, TCG_REG_ESP, stack_addend); 1447 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { 2095 2096 for (i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { 1448 2097 tcg_out_pop(s, tcg_target_callee_save_regs[i]); 1449 2098 } 1450 tcg_out8(s, 0xc3); /* ret */ 1451 } 1452 1453 void tcg_target_init(TCGContext *s) 1454 { 2099 tcg_out_opc(s, OPC_RET, 0, 0, 0); 2100 } 2101 2102 static void tcg_target_init(TCGContext *s) 2103 { 2104 #if !defined(CONFIG_USER_ONLY) 1455 2105 /* fail safe */ 1456 2106 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) 1457 2107 tcg_abort(); 1458 1459 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); 1460 tcg_regset_set32(tcg_target_call_clobber_regs, 0, 1461 (1 << TCG_REG_EAX) | 1462 (1 << TCG_REG_EDX) | 1463 (1 << TCG_REG_ECX)); 2108 #endif 2109 2110 if (TCG_TARGET_REG_BITS == 64) { 2111 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff); 2112 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff); 2113 } else { 2114 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); 2115 } 2116 2117 tcg_regset_clear(tcg_target_call_clobber_regs); 2118 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EAX); 2119 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_EDX); 2120 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_ECX); 2121 if (TCG_TARGET_REG_BITS == 64) { 2122 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RDI); 2123 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RSI); 2124 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8); 2125 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9); 2126 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10); 2127 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11); 2128 } 1464 2129 1465 2130 tcg_regset_clear(s->reserved_regs); -
trunk/src/recompiler/tcg/i386/tcg-target.h
r37675 r37689 24 24 #define TCG_TARGET_I386 1 25 25 26 #define TCG_TARGET_REG_BITS 32 26 #if defined(__x86_64__) 27 # define TCG_TARGET_REG_BITS 64 28 #else 29 # define TCG_TARGET_REG_BITS 32 30 #endif 27 31 //#define TCG_TARGET_WORDS_BIGENDIAN 28 32 29 #define TCG_TARGET_NB_REGS 8 33 #if TCG_TARGET_REG_BITS == 64 34 # define TCG_TARGET_NB_REGS 16 35 #else 36 # define TCG_TARGET_NB_REGS 8 37 #endif 30 38 31 39 enum { … … 38 46 TCG_REG_ESI, 39 47 TCG_REG_EDI, 48 49 /* 64-bit registers; always define the symbols to avoid 50 too much if-deffing. */ 51 TCG_REG_R8, 52 TCG_REG_R9, 53 TCG_REG_R10, 54 TCG_REG_R11, 55 TCG_REG_R12, 56 TCG_REG_R13, 57 TCG_REG_R14, 58 TCG_REG_R15, 59 TCG_REG_RAX = TCG_REG_EAX, 60 TCG_REG_RCX = TCG_REG_ECX, 61 TCG_REG_RDX = TCG_REG_EDX, 62 TCG_REG_RBX = TCG_REG_EBX, 63 TCG_REG_RSP = TCG_REG_ESP, 64 TCG_REG_RBP = TCG_REG_EBP, 65 TCG_REG_RSI = TCG_REG_ESI, 66 TCG_REG_RDI = TCG_REG_EDI, 40 67 }; 68 69 #define TCG_CT_CONST_S32 0x100 70 #define TCG_CT_CONST_U32 0x200 41 71 42 72 /* used for function call generation */ … … 46 76 47 77 /* optional instructions */ 78 #define TCG_TARGET_HAS_div2_i32 79 #define TCG_TARGET_HAS_rot_i32 80 #define TCG_TARGET_HAS_ext8s_i32 81 #define TCG_TARGET_HAS_ext16s_i32 82 #define TCG_TARGET_HAS_ext8u_i32 83 #define TCG_TARGET_HAS_ext16u_i32 48 84 #define TCG_TARGET_HAS_bswap16_i32 49 85 #define TCG_TARGET_HAS_bswap32_i32 50 86 #define TCG_TARGET_HAS_neg_i32 51 87 #define TCG_TARGET_HAS_not_i32 52 #define TCG_TARGET_HAS_ext8s_i32 53 #define TCG_TARGET_HAS_ext16s_i32 54 #define TCG_TARGET_HAS_rot_i32 55 #define TCG_TARGET_HAS_ext8u_i32 56 #define TCG_TARGET_HAS_ext16u_i32 88 // #define TCG_TARGET_HAS_andc_i32 89 // #define TCG_TARGET_HAS_orc_i32 90 // #define TCG_TARGET_HAS_eqv_i32 91 // #define TCG_TARGET_HAS_nand_i32 92 // #define TCG_TARGET_HAS_nor_i32 93 94 #if TCG_TARGET_REG_BITS == 64 95 #define TCG_TARGET_HAS_div2_i64 96 #define TCG_TARGET_HAS_rot_i64 97 #define TCG_TARGET_HAS_ext8s_i64 98 #define TCG_TARGET_HAS_ext16s_i64 99 #define TCG_TARGET_HAS_ext32s_i64 100 #define TCG_TARGET_HAS_ext8u_i64 101 #define TCG_TARGET_HAS_ext16u_i64 102 #define TCG_TARGET_HAS_ext32u_i64 103 #define TCG_TARGET_HAS_bswap16_i64 104 #define TCG_TARGET_HAS_bswap32_i64 105 #define TCG_TARGET_HAS_bswap64_i64 106 #define TCG_TARGET_HAS_neg_i64 107 #define TCG_TARGET_HAS_not_i64 108 // #define TCG_TARGET_HAS_andc_i64 109 // #define TCG_TARGET_HAS_orc_i64 110 // #define TCG_TARGET_HAS_eqv_i64 111 // #define TCG_TARGET_HAS_nand_i64 112 // #define TCG_TARGET_HAS_nor_i64 113 #endif 57 114 58 115 #define TCG_TARGET_HAS_GUEST_BASE 59 116 60 117 /* Note: must be synced with dyngen-exec.h */ 61 #ifndef VBOX 62 #define TCG_AREG0 TCG_REG_EBP 63 #define TCG_AREG1 TCG_REG_EBX 64 #define TCG_AREG2 TCG_REG_ESI 118 #if TCG_TARGET_REG_BITS == 64 119 # define TCG_AREG0 TCG_REG_R14 65 120 #else 66 # define TCG_AREG0 TCG_REG_ESI 67 # define TCG_AREG1 TCG_REG_EDI 121 # ifndef VBOX /* we're using ESI instead of EBP, probably due to frame pointer opt issues */ 122 # define TCG_AREG0 TCG_REG_EBP 123 # else /* VBOX */ 124 # define TCG_AREG0 TCG_REG_ESI 125 # endif /* VBOX */ 68 126 #endif 69 127 -
trunk/src/recompiler/tcg/tcg-op.h
r37675 r37689 26 26 int gen_new_label(void); 27 27 28 static inline void tcg_gen_op1_i32( intopc, TCGv_i32 arg1)28 static inline void tcg_gen_op1_i32(TCGOpcode opc, TCGv_i32 arg1) 29 29 { 30 30 *gen_opc_ptr++ = opc; … … 32 32 } 33 33 34 static inline void tcg_gen_op1_i64( intopc, TCGv_i64 arg1)34 static inline void tcg_gen_op1_i64(TCGOpcode opc, TCGv_i64 arg1) 35 35 { 36 36 *gen_opc_ptr++ = opc; … … 38 38 } 39 39 40 static inline void tcg_gen_op1i( intopc, TCGArg arg1)40 static inline void tcg_gen_op1i(TCGOpcode opc, TCGArg arg1) 41 41 { 42 42 *gen_opc_ptr++ = opc; … … 44 44 } 45 45 46 static inline void tcg_gen_op2_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2)46 static inline void tcg_gen_op2_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2) 47 47 { 48 48 *gen_opc_ptr++ = opc; … … 51 51 } 52 52 53 static inline void tcg_gen_op2_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2)53 static inline void tcg_gen_op2_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2) 54 54 { 55 55 *gen_opc_ptr++ = opc; … … 58 58 } 59 59 60 static inline void tcg_gen_op2i_i32( intopc, TCGv_i32 arg1, TCGArg arg2)60 static inline void tcg_gen_op2i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGArg arg2) 61 61 { 62 62 *gen_opc_ptr++ = opc; … … 65 65 } 66 66 67 static inline void tcg_gen_op2i_i64( intopc, TCGv_i64 arg1, TCGArg arg2)67 static inline void tcg_gen_op2i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGArg arg2) 68 68 { 69 69 *gen_opc_ptr++ = opc; … … 72 72 } 73 73 74 static inline void tcg_gen_op2ii( intopc, TCGArg arg1, TCGArg arg2)74 static inline void tcg_gen_op2ii(TCGOpcode opc, TCGArg arg1, TCGArg arg2) 75 75 { 76 76 *gen_opc_ptr++ = opc; … … 79 79 } 80 80 81 static inline void tcg_gen_op3_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,81 static inline void tcg_gen_op3_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 82 82 TCGv_i32 arg3) 83 83 { … … 88 88 } 89 89 90 static inline void tcg_gen_op3_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,90 static inline void tcg_gen_op3_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 91 91 TCGv_i64 arg3) 92 92 { … … 97 97 } 98 98 99 static inline void tcg_gen_op3i_i32( int opc, TCGv_i32 arg1, TCGv_i32 arg2,100 TCG Arg arg3)99 static inline void tcg_gen_op3i_i32(TCGOpcode opc, TCGv_i32 arg1, 100 TCGv_i32 arg2, TCGArg arg3) 101 101 { 102 102 *gen_opc_ptr++ = opc; … … 106 106 } 107 107 108 static inline void tcg_gen_op3i_i64( int opc, TCGv_i64 arg1, TCGv_i64 arg2,109 TCG Arg arg3)108 static inline void tcg_gen_op3i_i64(TCGOpcode opc, TCGv_i64 arg1, 109 TCGv_i64 arg2, TCGArg arg3) 110 110 { 111 111 *gen_opc_ptr++ = opc; … … 115 115 } 116 116 117 static inline void tcg_gen_ldst_op_i32( int opc, TCGv_i32 val, TCGv_ptr base,118 TCG Arg offset)117 static inline void tcg_gen_ldst_op_i32(TCGOpcode opc, TCGv_i32 val, 118 TCGv_ptr base, TCGArg offset) 119 119 { 120 120 *gen_opc_ptr++ = opc; … … 124 124 } 125 125 126 static inline void tcg_gen_ldst_op_i64( int opc, TCGv_i64 val, TCGv_ptr base,127 TCG Arg offset)126 static inline void tcg_gen_ldst_op_i64(TCGOpcode opc, TCGv_i64 val, 127 TCGv_ptr base, TCGArg offset) 128 128 { 129 129 *gen_opc_ptr++ = opc; … … 133 133 } 134 134 135 static inline void tcg_gen_qemu_ldst_op_i64_i32( int opc, TCGv_i64 val, TCGv_i32 addr,136 TCG Arg mem_index)135 static inline void tcg_gen_qemu_ldst_op_i64_i32(TCGOpcode opc, TCGv_i64 val, 136 TCGv_i32 addr, TCGArg mem_index) 137 137 { 138 138 *gen_opc_ptr++ = opc; … … 142 142 } 143 143 144 static inline void tcg_gen_qemu_ldst_op_i64_i64( int opc, TCGv_i64 val, TCGv_i64 addr,145 TCG Arg mem_index)144 static inline void tcg_gen_qemu_ldst_op_i64_i64(TCGOpcode opc, TCGv_i64 val, 145 TCGv_i64 addr, TCGArg mem_index) 146 146 { 147 147 *gen_opc_ptr++ = opc; … … 151 151 } 152 152 153 static inline void tcg_gen_op4_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,153 static inline void tcg_gen_op4_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 154 154 TCGv_i32 arg3, TCGv_i32 arg4) 155 155 { … … 161 161 } 162 162 163 static inline void tcg_gen_op4_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,163 static inline void tcg_gen_op4_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 164 164 TCGv_i64 arg3, TCGv_i64 arg4) 165 165 { … … 171 171 } 172 172 173 static inline void tcg_gen_op4i_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,173 static inline void tcg_gen_op4i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 174 174 TCGv_i32 arg3, TCGArg arg4) 175 175 { … … 181 181 } 182 182 183 static inline void tcg_gen_op4i_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,183 static inline void tcg_gen_op4i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 184 184 TCGv_i64 arg3, TCGArg arg4) 185 185 { … … 191 191 } 192 192 193 static inline void tcg_gen_op4ii_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,193 static inline void tcg_gen_op4ii_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 194 194 TCGArg arg3, TCGArg arg4) 195 195 { … … 201 201 } 202 202 203 static inline void tcg_gen_op4ii_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,203 static inline void tcg_gen_op4ii_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 204 204 TCGArg arg3, TCGArg arg4) 205 205 { … … 211 211 } 212 212 213 static inline void tcg_gen_op5_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,213 static inline void tcg_gen_op5_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 214 214 TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5) 215 215 { … … 222 222 } 223 223 224 static inline void tcg_gen_op5_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,224 static inline void tcg_gen_op5_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 225 225 TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5) 226 226 { … … 233 233 } 234 234 235 static inline void tcg_gen_op5i_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,235 static inline void tcg_gen_op5i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 236 236 TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5) 237 237 { … … 244 244 } 245 245 246 static inline void tcg_gen_op5i_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,246 static inline void tcg_gen_op5i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 247 247 TCGv_i64 arg3, TCGv_i64 arg4, TCGArg arg5) 248 248 { … … 255 255 } 256 256 257 static inline void tcg_gen_op6_i32( intopc, TCGv_i32 arg1, TCGv_i32 arg2,257 static inline void tcg_gen_op6_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 258 258 TCGv_i32 arg3, TCGv_i32 arg4, TCGv_i32 arg5, 259 259 TCGv_i32 arg6) … … 268 268 } 269 269 270 static inline void tcg_gen_op6_i64( intopc, TCGv_i64 arg1, TCGv_i64 arg2,270 static inline void tcg_gen_op6_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 271 271 TCGv_i64 arg3, TCGv_i64 arg4, TCGv_i64 arg5, 272 272 TCGv_i64 arg6) … … 281 281 } 282 282 283 static inline void tcg_gen_op6ii_i32(int opc, TCGv_i32 arg1, TCGv_i32 arg2, 284 TCGv_i32 arg3, TCGv_i32 arg4, TCGArg arg5, 285 TCGArg arg6) 283 static inline void tcg_gen_op6i_i32(TCGOpcode opc, TCGv_i32 arg1, TCGv_i32 arg2, 284 TCGv_i32 arg3, TCGv_i32 arg4, 285 TCGv_i32 arg5, TCGArg arg6) 286 { 287 *gen_opc_ptr++ = opc; 288 *gen_opparam_ptr++ = GET_TCGV_I32(arg1); 289 *gen_opparam_ptr++ = GET_TCGV_I32(arg2); 290 *gen_opparam_ptr++ = GET_TCGV_I32(arg3); 291 *gen_opparam_ptr++ = GET_TCGV_I32(arg4); 292 *gen_opparam_ptr++ = GET_TCGV_I32(arg5); 293 *gen_opparam_ptr++ = arg6; 294 } 295 296 static inline void tcg_gen_op6i_i64(TCGOpcode opc, TCGv_i64 arg1, TCGv_i64 arg2, 297 TCGv_i64 arg3, TCGv_i64 arg4, 298 TCGv_i64 arg5, TCGArg arg6) 299 { 300 *gen_opc_ptr++ = opc; 301 *gen_opparam_ptr++ = GET_TCGV_I64(arg1); 302 *gen_opparam_ptr++ = GET_TCGV_I64(arg2); 303 *gen_opparam_ptr++ = GET_TCGV_I64(arg3); 304 *gen_opparam_ptr++ = GET_TCGV_I64(arg4); 305 *gen_opparam_ptr++ = GET_TCGV_I64(arg5); 306 *gen_opparam_ptr++ = arg6; 307 } 308 309 static inline void tcg_gen_op6ii_i32(TCGOpcode opc, TCGv_i32 arg1, 310 TCGv_i32 arg2, TCGv_i32 arg3, 311 TCGv_i32 arg4, TCGArg arg5, TCGArg arg6) 286 312 { 287 313 *gen_opc_ptr++ = opc; … … 294 320 } 295 321 296 static inline void tcg_gen_op6ii_i64( int opc, TCGv_i64 arg1, TCGv_i64 arg2,297 TCGv_i64 arg 3, TCGv_i64 arg4, TCGArg arg5,298 TCG Arg arg6)322 static inline void tcg_gen_op6ii_i64(TCGOpcode opc, TCGv_i64 arg1, 323 TCGv_i64 arg2, TCGv_i64 arg3, 324 TCGv_i64 arg4, TCGArg arg5, TCGArg arg6) 299 325 { 300 326 *gen_opc_ptr++ = opc; … … 328 354 } 329 355 356 /* A version of dh_sizemask from def-helper.h that doesn't rely on 357 preprocessor magic. */ 358 static inline int tcg_gen_sizemask(int n, int is_64bit, int is_signed) 359 { 360 return (is_64bit << n*2) | (is_signed << (n*2 + 1)); 361 } 362 330 363 /* helper calls */ 331 364 static inline void tcg_gen_helperN(void *func, int flags, int sizemask, … … 339 372 } 340 373 341 /* FIXME: Should this be pure? */ 342 static inline void tcg_gen_helper64(void *func, TCGv_i64 ret, 374 /* Note: Both tcg_gen_helper32() and tcg_gen_helper64() are currently 375 reserved for helpers in tcg-runtime.c. These helpers are all const 376 and pure, hence the call to tcg_gen_callN() with TCG_CALL_CONST | 377 TCG_CALL_PURE. This may need to be adjusted if these functions 378 start to be used with other helpers. */ 379 static inline void tcg_gen_helper32(void *func, int sizemask, TCGv_i32 ret, 380 TCGv_i32 a, TCGv_i32 b) 381 { 382 TCGv_ptr fn; 383 TCGArg args[2]; 384 fn = tcg_const_ptr((tcg_target_long)func); 385 args[0] = GET_TCGV_I32(a); 386 args[1] = GET_TCGV_I32(b); 387 tcg_gen_callN(&tcg_ctx, fn, TCG_CALL_CONST | TCG_CALL_PURE, sizemask, 388 GET_TCGV_I32(ret), 2, args); 389 tcg_temp_free_ptr(fn); 390 } 391 392 static inline void tcg_gen_helper64(void *func, int sizemask, TCGv_i64 ret, 343 393 TCGv_i64 a, TCGv_i64 b) 344 394 { … … 348 398 args[0] = GET_TCGV_I64(a); 349 399 args[1] = GET_TCGV_I64(b); 350 tcg_gen_callN(&tcg_ctx, fn, 0, 7, GET_TCGV_I64(ret), 2, args); 400 tcg_gen_callN(&tcg_ctx, fn, TCG_CALL_CONST | TCG_CALL_PURE, sizemask, 401 GET_TCGV_I64(ret), 2, args); 351 402 tcg_temp_free_ptr(fn); 352 403 } … … 550 601 } 551 602 552 static inline void tcg_gen_brcond_i32( int cond, TCGv_i32 arg1, TCGv_i32 arg2,553 int label_index)603 static inline void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, 604 TCGv_i32 arg2, int label_index) 554 605 { 555 606 tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_index); 556 607 } 557 608 558 static inline void tcg_gen_brcondi_i32( int cond, TCGv_i32 arg1, int32_t arg2,559 int label_index)609 static inline void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, 610 int32_t arg2, int label_index) 560 611 { 561 612 TCGv_i32 t0 = tcg_const_i32(arg2); … … 564 615 } 565 616 617 static inline void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, 618 TCGv_i32 arg1, TCGv_i32 arg2) 619 { 620 tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond); 621 } 622 623 static inline void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, 624 TCGv_i32 arg1, int32_t arg2) 625 { 626 TCGv_i32 t0 = tcg_const_i32(arg2); 627 tcg_gen_setcond_i32(cond, ret, arg1, t0); 628 tcg_temp_free_i32(t0); 629 } 630 566 631 static inline void tcg_gen_mul_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 567 632 { … … 596 661 tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); 597 662 } 598 #el se663 #elif defined(TCG_TARGET_HAS_div2_i32) 599 664 static inline void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 600 665 { … … 632 697 tcg_temp_free_i32(t0); 633 698 } 699 #else 700 static inline void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 701 { 702 int sizemask = 0; 703 /* Return value and both arguments are 32-bit and signed. */ 704 sizemask |= tcg_gen_sizemask(0, 0, 1); 705 sizemask |= tcg_gen_sizemask(1, 0, 1); 706 sizemask |= tcg_gen_sizemask(2, 0, 1); 707 708 tcg_gen_helper32(tcg_helper_div_i32, sizemask, ret, arg1, arg2); 709 } 710 711 static inline void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 712 { 713 int sizemask = 0; 714 /* Return value and both arguments are 32-bit and signed. */ 715 sizemask |= tcg_gen_sizemask(0, 0, 1); 716 sizemask |= tcg_gen_sizemask(1, 0, 1); 717 sizemask |= tcg_gen_sizemask(2, 0, 1); 718 719 tcg_gen_helper32(tcg_helper_rem_i32, sizemask, ret, arg1, arg2); 720 } 721 722 static inline void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 723 { 724 int sizemask = 0; 725 /* Return value and both arguments are 32-bit and unsigned. */ 726 sizemask |= tcg_gen_sizemask(0, 0, 0); 727 sizemask |= tcg_gen_sizemask(1, 0, 0); 728 sizemask |= tcg_gen_sizemask(2, 0, 0); 729 730 tcg_gen_helper32(tcg_helper_divu_i32, ret, arg1, arg2, 0); 731 } 732 733 static inline void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 734 { 735 int sizemask = 0; 736 /* Return value and both arguments are 32-bit and unsigned. */ 737 sizemask |= tcg_gen_sizemask(0, 0, 0); 738 sizemask |= tcg_gen_sizemask(1, 0, 0); 739 sizemask |= tcg_gen_sizemask(2, 0, 0); 740 741 tcg_gen_helper32(tcg_helper_remu_i32, ret, arg1, arg2, 0); 742 } 634 743 #endif 635 744 … … 790 899 static inline void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 791 900 { 792 tcg_gen_helper64(tcg_helper_shl_i64, ret, arg1, arg2); 901 int sizemask = 0; 902 /* Return value and both arguments are 64-bit and signed. */ 903 sizemask |= tcg_gen_sizemask(0, 1, 1); 904 sizemask |= tcg_gen_sizemask(1, 1, 1); 905 sizemask |= tcg_gen_sizemask(2, 1, 1); 906 907 tcg_gen_helper64(tcg_helper_shl_i64, sizemask, ret, arg1, arg2); 793 908 } 794 909 … … 800 915 static inline void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 801 916 { 802 tcg_gen_helper64(tcg_helper_shr_i64, ret, arg1, arg2); 917 int sizemask = 0; 918 /* Return value and both arguments are 64-bit and signed. */ 919 sizemask |= tcg_gen_sizemask(0, 1, 1); 920 sizemask |= tcg_gen_sizemask(1, 1, 1); 921 sizemask |= tcg_gen_sizemask(2, 1, 1); 922 923 tcg_gen_helper64(tcg_helper_shr_i64, sizemask, ret, arg1, arg2); 803 924 } 804 925 … … 810 931 static inline void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 811 932 { 812 tcg_gen_helper64(tcg_helper_sar_i64, ret, arg1, arg2); 933 int sizemask = 0; 934 /* Return value and both arguments are 64-bit and signed. */ 935 sizemask |= tcg_gen_sizemask(0, 1, 1); 936 sizemask |= tcg_gen_sizemask(1, 1, 1); 937 sizemask |= tcg_gen_sizemask(2, 1, 1); 938 939 tcg_gen_helper64(tcg_helper_sar_i64, sizemask, ret, arg1, arg2); 813 940 } 814 941 … … 818 945 } 819 946 820 static inline void tcg_gen_brcond_i64( int cond, TCGv_i64 arg1, TCGv_i64 arg2,821 int label_index)947 static inline void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, 948 TCGv_i64 arg2, int label_index) 822 949 { 823 950 tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, 824 951 TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), 825 952 TCGV_HIGH(arg2), cond, label_index); 953 } 954 955 static inline void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, 956 TCGv_i64 arg1, TCGv_i64 arg2) 957 { 958 tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), 959 TCGV_LOW(arg1), TCGV_HIGH(arg1), 960 TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); 961 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); 826 962 } 827 963 … … 849 985 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 850 986 { 851 tcg_gen_helper64(tcg_helper_div_i64, ret, arg1, arg2); 987 int sizemask = 0; 988 /* Return value and both arguments are 64-bit and signed. */ 989 sizemask |= tcg_gen_sizemask(0, 1, 1); 990 sizemask |= tcg_gen_sizemask(1, 1, 1); 991 sizemask |= tcg_gen_sizemask(2, 1, 1); 992 993 tcg_gen_helper64(tcg_helper_div_i64, sizemask, ret, arg1, arg2); 852 994 } 853 995 854 996 static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 855 997 { 856 tcg_gen_helper64(tcg_helper_rem_i64, ret, arg1, arg2); 998 int sizemask = 0; 999 /* Return value and both arguments are 64-bit and signed. */ 1000 sizemask |= tcg_gen_sizemask(0, 1, 1); 1001 sizemask |= tcg_gen_sizemask(1, 1, 1); 1002 sizemask |= tcg_gen_sizemask(2, 1, 1); 1003 1004 tcg_gen_helper64(tcg_helper_rem_i64, sizemask, ret, arg1, arg2); 857 1005 } 858 1006 859 1007 static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 860 1008 { 861 tcg_gen_helper64(tcg_helper_divu_i64, ret, arg1, arg2); 1009 int sizemask = 0; 1010 /* Return value and both arguments are 64-bit and unsigned. */ 1011 sizemask |= tcg_gen_sizemask(0, 1, 0); 1012 sizemask |= tcg_gen_sizemask(1, 1, 0); 1013 sizemask |= tcg_gen_sizemask(2, 1, 0); 1014 1015 tcg_gen_helper64(tcg_helper_divu_i64, sizemask, ret, arg1, arg2); 862 1016 } 863 1017 864 1018 static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 865 1019 { 866 tcg_gen_helper64(tcg_helper_remu_i64, ret, arg1, arg2); 1020 int sizemask = 0; 1021 /* Return value and both arguments are 64-bit and unsigned. */ 1022 sizemask |= tcg_gen_sizemask(0, 1, 0); 1023 sizemask |= tcg_gen_sizemask(1, 1, 0); 1024 sizemask |= tcg_gen_sizemask(2, 1, 0); 1025 1026 tcg_gen_helper64(tcg_helper_remu_i64, sizemask, ret, arg1, arg2); 867 1027 } 868 1028 … … 1050 1210 } 1051 1211 1052 static inline void tcg_gen_brcond_i64( int cond, TCGv_i64 arg1, TCGv_i64 arg2,1053 int label_index)1212 static inline void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, 1213 TCGv_i64 arg2, int label_index) 1054 1214 { 1055 1215 tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_index); 1216 } 1217 1218 static inline void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, 1219 TCGv_i64 arg1, TCGv_i64 arg2) 1220 { 1221 tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond); 1056 1222 } 1057 1223 … … 1081 1247 tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); 1082 1248 } 1083 #el se1249 #elif defined(TCG_TARGET_HAS_div2_i64) 1084 1250 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1085 1251 { … … 1116 1282 tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); 1117 1283 tcg_temp_free_i64(t0); 1284 } 1285 #else 1286 static inline void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1287 { 1288 int sizemask = 0; 1289 /* Return value and both arguments are 64-bit and signed. */ 1290 sizemask |= tcg_gen_sizemask(0, 1, 1); 1291 sizemask |= tcg_gen_sizemask(1, 1, 1); 1292 sizemask |= tcg_gen_sizemask(2, 1, 1); 1293 1294 tcg_gen_helper64(tcg_helper_div_i64, sizemask, ret, arg1, arg2); 1295 } 1296 1297 static inline void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1298 { 1299 int sizemask = 0; 1300 /* Return value and both arguments are 64-bit and signed. */ 1301 sizemask |= tcg_gen_sizemask(0, 1, 1); 1302 sizemask |= tcg_gen_sizemask(1, 1, 1); 1303 sizemask |= tcg_gen_sizemask(2, 1, 1); 1304 1305 tcg_gen_helper64(tcg_helper_rem_i64, sizemask, ret, arg1, arg2); 1306 } 1307 1308 static inline void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1309 { 1310 int sizemask = 0; 1311 /* Return value and both arguments are 64-bit and unsigned. */ 1312 sizemask |= tcg_gen_sizemask(0, 1, 0); 1313 sizemask |= tcg_gen_sizemask(1, 1, 0); 1314 sizemask |= tcg_gen_sizemask(2, 1, 0); 1315 1316 tcg_gen_helper64(tcg_helper_divu_i64, sizemask, ret, arg1, arg2); 1317 } 1318 1319 static inline void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1320 { 1321 int sizemask = 0; 1322 /* Return value and both arguments are 64-bit and unsigned. */ 1323 sizemask |= tcg_gen_sizemask(0, 1, 0); 1324 sizemask |= tcg_gen_sizemask(1, 1, 0); 1325 sizemask |= tcg_gen_sizemask(2, 1, 0); 1326 1327 tcg_gen_helper64(tcg_helper_remu_i64, sizemask, ret, arg1, arg2); 1118 1328 } 1119 1329 #endif … … 1151 1361 } 1152 1362 } 1153 static inline void tcg_gen_brcondi_i64( int cond, TCGv_i64 arg1, int64_t arg2,1154 int label_index)1363 static inline void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, 1364 int64_t arg2, int label_index) 1155 1365 { 1156 1366 TCGv_i64 t0 = tcg_const_i64(arg2); 1157 1367 tcg_gen_brcond_i64(cond, arg1, t0, label_index); 1368 tcg_temp_free_i64(t0); 1369 } 1370 1371 static inline void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, 1372 TCGv_i64 arg1, int64_t arg2) 1373 { 1374 TCGv_i64 t0 = tcg_const_i64(arg2); 1375 tcg_gen_setcond_i64(cond, ret, arg1, t0); 1158 1376 tcg_temp_free_i64(t0); 1159 1377 } … … 1533 1751 #ifdef TCG_TARGET_HAS_not_i64 1534 1752 tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg); 1753 #elif defined(TCG_TARGET_HAS_not_i32) && TCG_TARGET_REG_BITS == 32 1754 tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg)); 1755 tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); 1535 1756 #else 1536 1757 tcg_gen_xori_i64(ret, arg, -1); … … 1588 1809 static inline void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1589 1810 { 1811 #ifdef TCG_TARGET_HAS_andc_i32 1812 tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); 1813 #else 1590 1814 TCGv_i32 t0; 1591 1815 t0 = tcg_temp_new_i32(); … … 1593 1817 tcg_gen_and_i32(ret, arg1, t0); 1594 1818 tcg_temp_free_i32(t0); 1819 #endif 1595 1820 } 1596 1821 1597 1822 static inline void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1598 1823 { 1824 #ifdef TCG_TARGET_HAS_andc_i64 1825 tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); 1826 #elif defined(TCG_TARGET_HAS_andc_i32) && TCG_TARGET_REG_BITS == 32 1827 tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 1828 tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 1829 #else 1599 1830 TCGv_i64 t0; 1600 1831 t0 = tcg_temp_new_i64(); … … 1602 1833 tcg_gen_and_i64(ret, arg1, t0); 1603 1834 tcg_temp_free_i64(t0); 1835 #endif 1604 1836 } 1605 1837 1606 1838 static inline void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1607 1839 { 1840 #ifdef TCG_TARGET_HAS_eqv_i32 1841 tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2); 1842 #else 1608 1843 tcg_gen_xor_i32(ret, arg1, arg2); 1609 1844 tcg_gen_not_i32(ret, ret); 1845 #endif 1610 1846 } 1611 1847 1612 1848 static inline void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1613 1849 { 1850 #ifdef TCG_TARGET_HAS_eqv_i64 1851 tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2); 1852 #elif defined(TCG_TARGET_HAS_eqv_i32) && TCG_TARGET_REG_BITS == 32 1853 tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 1854 tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 1855 #else 1614 1856 tcg_gen_xor_i64(ret, arg1, arg2); 1615 1857 tcg_gen_not_i64(ret, ret); 1858 #endif 1616 1859 } 1617 1860 1618 1861 static inline void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1619 1862 { 1863 #ifdef TCG_TARGET_HAS_nand_i32 1864 tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2); 1865 #else 1620 1866 tcg_gen_and_i32(ret, arg1, arg2); 1621 1867 tcg_gen_not_i32(ret, ret); 1868 #endif 1622 1869 } 1623 1870 1624 1871 static inline void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1625 1872 { 1873 #ifdef TCG_TARGET_HAS_nand_i64 1874 tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2); 1875 #elif defined(TCG_TARGET_HAS_nand_i32) && TCG_TARGET_REG_BITS == 32 1876 tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 1877 tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 1878 #else 1626 1879 tcg_gen_and_i64(ret, arg1, arg2); 1627 1880 tcg_gen_not_i64(ret, ret); 1881 #endif 1628 1882 } 1629 1883 1630 1884 static inline void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1631 1885 { 1886 #ifdef TCG_TARGET_HAS_nor_i32 1887 tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2); 1888 #else 1632 1889 tcg_gen_or_i32(ret, arg1, arg2); 1633 1890 tcg_gen_not_i32(ret, ret); 1891 #endif 1634 1892 } 1635 1893 1636 1894 static inline void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1637 1895 { 1896 #ifdef TCG_TARGET_HAS_nor_i64 1897 tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2); 1898 #elif defined(TCG_TARGET_HAS_nor_i32) && TCG_TARGET_REG_BITS == 32 1899 tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 1900 tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 1901 #else 1638 1902 tcg_gen_or_i64(ret, arg1, arg2); 1639 1903 tcg_gen_not_i64(ret, ret); 1904 #endif 1640 1905 } 1641 1906 1642 1907 static inline void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) 1643 1908 { 1909 #ifdef TCG_TARGET_HAS_orc_i32 1910 tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); 1911 #else 1644 1912 TCGv_i32 t0; 1645 1913 t0 = tcg_temp_new_i32(); … … 1647 1915 tcg_gen_or_i32(ret, arg1, t0); 1648 1916 tcg_temp_free_i32(t0); 1917 #endif 1649 1918 } 1650 1919 1651 1920 static inline void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) 1652 1921 { 1922 #ifdef TCG_TARGET_HAS_orc_i64 1923 tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); 1924 #elif defined(TCG_TARGET_HAS_orc_i32) && TCG_TARGET_REG_BITS == 32 1925 tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); 1926 tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); 1927 #else 1653 1928 TCGv_i64 t0; 1654 1929 t0 = tcg_temp_new_i64(); … … 1656 1931 tcg_gen_or_i64(ret, arg1, t0); 1657 1932 tcg_temp_free_i64(t0); 1933 #endif 1658 1934 } 1659 1935 … … 1897 2173 { 1898 2174 #if TARGET_LONG_BITS == 32 1899 tcg_gen_op3i_i32(INDEX_op_qemu_ld32 u, ret, addr, mem_index);1900 #else 1901 tcg_gen_op4i_i32(INDEX_op_qemu_ld32 u, TCGV_LOW(ret), TCGV_LOW(addr),2175 tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index); 2176 #else 2177 tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr), 1902 2178 TCGV_HIGH(addr), mem_index); 1903 2179 tcg_gen_movi_i32(TCGV_HIGH(ret), 0); … … 1908 2184 { 1909 2185 #if TARGET_LONG_BITS == 32 1910 tcg_gen_op3i_i32(INDEX_op_qemu_ld32 u, ret, addr, mem_index);1911 #else 1912 tcg_gen_op4i_i32(INDEX_op_qemu_ld32 u, TCGV_LOW(ret), TCGV_LOW(addr),2186 tcg_gen_op3i_i32(INDEX_op_qemu_ld32, ret, addr, mem_index); 2187 #else 2188 tcg_gen_op4i_i32(INDEX_op_qemu_ld32, TCGV_LOW(ret), TCGV_LOW(addr), 1913 2189 TCGV_HIGH(addr), mem_index); 1914 2190 tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); … … 1994 2270 static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) 1995 2271 { 2272 #if TARGET_LONG_BITS == 32 2273 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index); 2274 #else 1996 2275 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32u, ret, addr, mem_index); 2276 #endif 1997 2277 } 1998 2278 1999 2279 static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) 2000 2280 { 2281 #if TARGET_LONG_BITS == 32 2282 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32, ret, addr, mem_index); 2283 #else 2001 2284 tcg_gen_qemu_ldst_op(INDEX_op_qemu_ld32s, ret, addr, mem_index); 2285 #endif 2002 2286 } 2003 2287 … … 2033 2317 2034 2318 #if TARGET_LONG_BITS == 64 2035 #define TCG_TYPE_TL TCG_TYPE_I642036 2319 #define tcg_gen_movi_tl tcg_gen_movi_i64 2037 2320 #define tcg_gen_mov_tl tcg_gen_mov_i64 … … 2068 2351 #define tcg_gen_brcond_tl tcg_gen_brcond_i64 2069 2352 #define tcg_gen_brcondi_tl tcg_gen_brcondi_i64 2353 #define tcg_gen_setcond_tl tcg_gen_setcond_i64 2354 #define tcg_gen_setcondi_tl tcg_gen_setcondi_i64 2070 2355 #define tcg_gen_mul_tl tcg_gen_mul_i64 2071 2356 #define tcg_gen_muli_tl tcg_gen_muli_i64 … … 2103 2388 #define tcg_const_local_tl tcg_const_local_i64 2104 2389 #else 2105 #define TCG_TYPE_TL TCG_TYPE_I322106 2390 #define tcg_gen_movi_tl tcg_gen_movi_i32 2107 2391 #define tcg_gen_mov_tl tcg_gen_mov_i32 … … 2138 2422 #define tcg_gen_brcond_tl tcg_gen_brcond_i32 2139 2423 #define tcg_gen_brcondi_tl tcg_gen_brcondi_i32 2424 #define tcg_gen_setcond_tl tcg_gen_setcond_i32 2425 #define tcg_gen_setcondi_tl tcg_gen_setcondi_i32 2140 2426 #define tcg_gen_mul_tl tcg_gen_mul_i32 2141 2427 #define tcg_gen_muli_tl tcg_gen_muli_i32 -
trunk/src/recompiler/tcg/tcg-opc.h
r37675 r37689 22 22 * THE SOFTWARE. 23 23 */ 24 #ifndef DEF2 25 #define DEF2(name, oargs, iargs, cargs, flags) DEF(name, oargs + iargs + cargs, 0) 26 #endif 24 25 /* 26 * DEF(name, oargs, iargs, cargs, flags) 27 */ 27 28 28 29 /* predefined ops */ 29 DEF2(end, 0, 0, 0, 0) /* must be kept first */ 30 DEF2(nop, 0, 0, 0, 0) 31 DEF2(nop1, 0, 0, 1, 0) 32 DEF2(nop2, 0, 0, 2, 0) 33 DEF2(nop3, 0, 0, 3, 0) 34 DEF2(nopn, 0, 0, 1, 0) /* variable number of parameters */ 35 36 DEF2(discard, 1, 0, 0, 0) 37 38 DEF2(set_label, 0, 0, 1, 0) 39 DEF2(call, 0, 1, 2, TCG_OPF_SIDE_EFFECTS) /* variable number of parameters */ 40 DEF2(jmp, 0, 1, 0, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 41 DEF2(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 42 43 DEF2(mov_i32, 1, 1, 0, 0) 44 DEF2(movi_i32, 1, 0, 1, 0) 30 DEF(end, 0, 0, 0, 0) /* must be kept first */ 31 DEF(nop, 0, 0, 0, 0) 32 DEF(nop1, 0, 0, 1, 0) 33 DEF(nop2, 0, 0, 2, 0) 34 DEF(nop3, 0, 0, 3, 0) 35 DEF(nopn, 0, 0, 1, 0) /* variable number of parameters */ 36 37 DEF(discard, 1, 0, 0, 0) 38 39 DEF(set_label, 0, 0, 1, 0) 40 DEF(call, 0, 1, 2, TCG_OPF_SIDE_EFFECTS) /* variable number of parameters */ 41 DEF(jmp, 0, 1, 0, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 42 DEF(br, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 43 44 DEF(mov_i32, 1, 1, 0, 0) 45 DEF(movi_i32, 1, 0, 1, 0) 46 DEF(setcond_i32, 1, 2, 1, 0) 45 47 /* load/store */ 46 DEF 2(ld8u_i32, 1, 1, 1, 0)47 DEF 2(ld8s_i32, 1, 1, 1, 0)48 DEF 2(ld16u_i32, 1, 1, 1, 0)49 DEF 2(ld16s_i32, 1, 1, 1, 0)50 DEF 2(ld_i32, 1, 1, 1, 0)51 DEF 2(st8_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)52 DEF 2(st16_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)53 DEF 2(st_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)48 DEF(ld8u_i32, 1, 1, 1, 0) 49 DEF(ld8s_i32, 1, 1, 1, 0) 50 DEF(ld16u_i32, 1, 1, 1, 0) 51 DEF(ld16s_i32, 1, 1, 1, 0) 52 DEF(ld_i32, 1, 1, 1, 0) 53 DEF(st8_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 54 DEF(st16_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 55 DEF(st_i32, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 54 56 /* arith */ 55 DEF 2(add_i32, 1, 2, 0, 0)56 DEF 2(sub_i32, 1, 2, 0, 0)57 DEF 2(mul_i32, 1, 2, 0, 0)57 DEF(add_i32, 1, 2, 0, 0) 58 DEF(sub_i32, 1, 2, 0, 0) 59 DEF(mul_i32, 1, 2, 0, 0) 58 60 #ifdef TCG_TARGET_HAS_div_i32 59 DEF2(div_i32, 1, 2, 0, 0) 60 DEF2(divu_i32, 1, 2, 0, 0) 61 DEF2(rem_i32, 1, 2, 0, 0) 62 DEF2(remu_i32, 1, 2, 0, 0) 63 #else 64 DEF2(div2_i32, 2, 3, 0, 0) 65 DEF2(divu2_i32, 2, 3, 0, 0) 66 #endif 67 DEF2(and_i32, 1, 2, 0, 0) 68 DEF2(or_i32, 1, 2, 0, 0) 69 DEF2(xor_i32, 1, 2, 0, 0) 61 DEF(div_i32, 1, 2, 0, 0) 62 DEF(divu_i32, 1, 2, 0, 0) 63 DEF(rem_i32, 1, 2, 0, 0) 64 DEF(remu_i32, 1, 2, 0, 0) 65 #endif 66 #ifdef TCG_TARGET_HAS_div2_i32 67 DEF(div2_i32, 2, 3, 0, 0) 68 DEF(divu2_i32, 2, 3, 0, 0) 69 #endif 70 DEF(and_i32, 1, 2, 0, 0) 71 DEF(or_i32, 1, 2, 0, 0) 72 DEF(xor_i32, 1, 2, 0, 0) 70 73 /* shifts/rotates */ 71 DEF 2(shl_i32, 1, 2, 0, 0)72 DEF 2(shr_i32, 1, 2, 0, 0)73 DEF 2(sar_i32, 1, 2, 0, 0)74 DEF(shl_i32, 1, 2, 0, 0) 75 DEF(shr_i32, 1, 2, 0, 0) 76 DEF(sar_i32, 1, 2, 0, 0) 74 77 #ifdef TCG_TARGET_HAS_rot_i32 75 DEF 2(rotl_i32, 1, 2, 0, 0)76 DEF 2(rotr_i32, 1, 2, 0, 0)77 #endif 78 79 DEF 2(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)78 DEF(rotl_i32, 1, 2, 0, 0) 79 DEF(rotr_i32, 1, 2, 0, 0) 80 #endif 81 82 DEF(brcond_i32, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 80 83 #if TCG_TARGET_REG_BITS == 32 81 DEF2(add2_i32, 2, 4, 0, 0) 82 DEF2(sub2_i32, 2, 4, 0, 0) 83 DEF2(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 84 DEF2(mulu2_i32, 2, 2, 0, 0) 84 DEF(add2_i32, 2, 4, 0, 0) 85 DEF(sub2_i32, 2, 4, 0, 0) 86 DEF(brcond2_i32, 0, 4, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 87 DEF(mulu2_i32, 2, 2, 0, 0) 88 DEF(setcond2_i32, 1, 4, 1, 0) 85 89 #endif 86 90 #ifdef TCG_TARGET_HAS_ext8s_i32 87 DEF 2(ext8s_i32, 1, 1, 0, 0)91 DEF(ext8s_i32, 1, 1, 0, 0) 88 92 #endif 89 93 #ifdef TCG_TARGET_HAS_ext16s_i32 90 DEF 2(ext16s_i32, 1, 1, 0, 0)94 DEF(ext16s_i32, 1, 1, 0, 0) 91 95 #endif 92 96 #ifdef TCG_TARGET_HAS_ext8u_i32 93 DEF 2(ext8u_i32, 1, 1, 0, 0)97 DEF(ext8u_i32, 1, 1, 0, 0) 94 98 #endif 95 99 #ifdef TCG_TARGET_HAS_ext16u_i32 96 DEF 2(ext16u_i32, 1, 1, 0, 0)100 DEF(ext16u_i32, 1, 1, 0, 0) 97 101 #endif 98 102 #ifdef TCG_TARGET_HAS_bswap16_i32 99 DEF 2(bswap16_i32, 1, 1, 0, 0)103 DEF(bswap16_i32, 1, 1, 0, 0) 100 104 #endif 101 105 #ifdef TCG_TARGET_HAS_bswap32_i32 102 DEF 2(bswap32_i32, 1, 1, 0, 0)106 DEF(bswap32_i32, 1, 1, 0, 0) 103 107 #endif 104 108 #ifdef TCG_TARGET_HAS_not_i32 105 DEF 2(not_i32, 1, 1, 0, 0)109 DEF(not_i32, 1, 1, 0, 0) 106 110 #endif 107 111 #ifdef TCG_TARGET_HAS_neg_i32 108 DEF2(neg_i32, 1, 1, 0, 0) 112 DEF(neg_i32, 1, 1, 0, 0) 113 #endif 114 #ifdef TCG_TARGET_HAS_andc_i32 115 DEF(andc_i32, 1, 2, 0, 0) 116 #endif 117 #ifdef TCG_TARGET_HAS_orc_i32 118 DEF(orc_i32, 1, 2, 0, 0) 119 #endif 120 #ifdef TCG_TARGET_HAS_eqv_i32 121 DEF(eqv_i32, 1, 2, 0, 0) 122 #endif 123 #ifdef TCG_TARGET_HAS_nand_i32 124 DEF(nand_i32, 1, 2, 0, 0) 125 #endif 126 #ifdef TCG_TARGET_HAS_nor_i32 127 DEF(nor_i32, 1, 2, 0, 0) 109 128 #endif 110 129 111 130 #if TCG_TARGET_REG_BITS == 64 112 DEF2(mov_i64, 1, 1, 0, 0) 113 DEF2(movi_i64, 1, 0, 1, 0) 131 DEF(mov_i64, 1, 1, 0, 0) 132 DEF(movi_i64, 1, 0, 1, 0) 133 DEF(setcond_i64, 1, 2, 1, 0) 114 134 /* load/store */ 115 DEF 2(ld8u_i64, 1, 1, 1, 0)116 DEF 2(ld8s_i64, 1, 1, 1, 0)117 DEF 2(ld16u_i64, 1, 1, 1, 0)118 DEF 2(ld16s_i64, 1, 1, 1, 0)119 DEF 2(ld32u_i64, 1, 1, 1, 0)120 DEF 2(ld32s_i64, 1, 1, 1, 0)121 DEF 2(ld_i64, 1, 1, 1, 0)122 DEF 2(st8_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)123 DEF 2(st16_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)124 DEF 2(st32_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)125 DEF 2(st_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS)135 DEF(ld8u_i64, 1, 1, 1, 0) 136 DEF(ld8s_i64, 1, 1, 1, 0) 137 DEF(ld16u_i64, 1, 1, 1, 0) 138 DEF(ld16s_i64, 1, 1, 1, 0) 139 DEF(ld32u_i64, 1, 1, 1, 0) 140 DEF(ld32s_i64, 1, 1, 1, 0) 141 DEF(ld_i64, 1, 1, 1, 0) 142 DEF(st8_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 143 DEF(st16_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 144 DEF(st32_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 145 DEF(st_i64, 0, 2, 1, TCG_OPF_SIDE_EFFECTS) 126 146 /* arith */ 127 DEF 2(add_i64, 1, 2, 0, 0)128 DEF 2(sub_i64, 1, 2, 0, 0)129 DEF 2(mul_i64, 1, 2, 0, 0)147 DEF(add_i64, 1, 2, 0, 0) 148 DEF(sub_i64, 1, 2, 0, 0) 149 DEF(mul_i64, 1, 2, 0, 0) 130 150 #ifdef TCG_TARGET_HAS_div_i64 131 DEF2(div_i64, 1, 2, 0, 0) 132 DEF2(divu_i64, 1, 2, 0, 0) 133 DEF2(rem_i64, 1, 2, 0, 0) 134 DEF2(remu_i64, 1, 2, 0, 0) 135 #else 136 DEF2(div2_i64, 2, 3, 0, 0) 137 DEF2(divu2_i64, 2, 3, 0, 0) 138 #endif 139 DEF2(and_i64, 1, 2, 0, 0) 140 DEF2(or_i64, 1, 2, 0, 0) 141 DEF2(xor_i64, 1, 2, 0, 0) 151 DEF(div_i64, 1, 2, 0, 0) 152 DEF(divu_i64, 1, 2, 0, 0) 153 DEF(rem_i64, 1, 2, 0, 0) 154 DEF(remu_i64, 1, 2, 0, 0) 155 #endif 156 #ifdef TCG_TARGET_HAS_div2_i64 157 DEF(div2_i64, 2, 3, 0, 0) 158 DEF(divu2_i64, 2, 3, 0, 0) 159 #endif 160 DEF(and_i64, 1, 2, 0, 0) 161 DEF(or_i64, 1, 2, 0, 0) 162 DEF(xor_i64, 1, 2, 0, 0) 142 163 /* shifts/rotates */ 143 DEF 2(shl_i64, 1, 2, 0, 0)144 DEF 2(shr_i64, 1, 2, 0, 0)145 DEF 2(sar_i64, 1, 2, 0, 0)164 DEF(shl_i64, 1, 2, 0, 0) 165 DEF(shr_i64, 1, 2, 0, 0) 166 DEF(sar_i64, 1, 2, 0, 0) 146 167 #ifdef TCG_TARGET_HAS_rot_i64 147 DEF 2(rotl_i64, 1, 2, 0, 0)148 DEF 2(rotr_i64, 1, 2, 0, 0)149 #endif 150 151 DEF 2(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)168 DEF(rotl_i64, 1, 2, 0, 0) 169 DEF(rotr_i64, 1, 2, 0, 0) 170 #endif 171 172 DEF(brcond_i64, 0, 2, 2, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 152 173 #ifdef TCG_TARGET_HAS_ext8s_i64 153 DEF 2(ext8s_i64, 1, 1, 0, 0)174 DEF(ext8s_i64, 1, 1, 0, 0) 154 175 #endif 155 176 #ifdef TCG_TARGET_HAS_ext16s_i64 156 DEF 2(ext16s_i64, 1, 1, 0, 0)177 DEF(ext16s_i64, 1, 1, 0, 0) 157 178 #endif 158 179 #ifdef TCG_TARGET_HAS_ext32s_i64 159 DEF 2(ext32s_i64, 1, 1, 0, 0)180 DEF(ext32s_i64, 1, 1, 0, 0) 160 181 #endif 161 182 #ifdef TCG_TARGET_HAS_ext8u_i64 162 DEF 2(ext8u_i64, 1, 1, 0, 0)183 DEF(ext8u_i64, 1, 1, 0, 0) 163 184 #endif 164 185 #ifdef TCG_TARGET_HAS_ext16u_i64 165 DEF 2(ext16u_i64, 1, 1, 0, 0)186 DEF(ext16u_i64, 1, 1, 0, 0) 166 187 #endif 167 188 #ifdef TCG_TARGET_HAS_ext32u_i64 168 DEF 2(ext32u_i64, 1, 1, 0, 0)189 DEF(ext32u_i64, 1, 1, 0, 0) 169 190 #endif 170 191 #ifdef TCG_TARGET_HAS_bswap16_i64 171 DEF 2(bswap16_i64, 1, 1, 0, 0)192 DEF(bswap16_i64, 1, 1, 0, 0) 172 193 #endif 173 194 #ifdef TCG_TARGET_HAS_bswap32_i64 174 DEF 2(bswap32_i64, 1, 1, 0, 0)195 DEF(bswap32_i64, 1, 1, 0, 0) 175 196 #endif 176 197 #ifdef TCG_TARGET_HAS_bswap64_i64 177 DEF 2(bswap64_i64, 1, 1, 0, 0)198 DEF(bswap64_i64, 1, 1, 0, 0) 178 199 #endif 179 200 #ifdef TCG_TARGET_HAS_not_i64 180 DEF 2(not_i64, 1, 1, 0, 0)201 DEF(not_i64, 1, 1, 0, 0) 181 202 #endif 182 203 #ifdef TCG_TARGET_HAS_neg_i64 183 DEF2(neg_i64, 1, 1, 0, 0) 204 DEF(neg_i64, 1, 1, 0, 0) 205 #endif 206 #ifdef TCG_TARGET_HAS_andc_i64 207 DEF(andc_i64, 1, 2, 0, 0) 208 #endif 209 #ifdef TCG_TARGET_HAS_orc_i64 210 DEF(orc_i64, 1, 2, 0, 0) 211 #endif 212 #ifdef TCG_TARGET_HAS_eqv_i64 213 DEF(eqv_i64, 1, 2, 0, 0) 214 #endif 215 #ifdef TCG_TARGET_HAS_nand_i64 216 DEF(nand_i64, 1, 2, 0, 0) 217 #endif 218 #ifdef TCG_TARGET_HAS_nor_i64 219 DEF(nor_i64, 1, 2, 0, 0) 184 220 #endif 185 221 #endif … … 187 223 /* QEMU specific */ 188 224 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS 189 DEF 2(debug_insn_start, 0, 0, 2, 0)190 #else 191 DEF 2(debug_insn_start, 0, 0, 1, 0)192 #endif 193 DEF 2(exit_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)194 DEF 2(goto_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS)225 DEF(debug_insn_start, 0, 0, 2, 0) 226 #else 227 DEF(debug_insn_start, 0, 0, 1, 0) 228 #endif 229 DEF(exit_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 230 DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END | TCG_OPF_SIDE_EFFECTS) 195 231 /* Note: even if TARGET_LONG_BITS is not defined, the INDEX_op 196 232 constants must be defined */ 197 233 #if TCG_TARGET_REG_BITS == 32 198 234 #if TARGET_LONG_BITS == 32 199 DEF2(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 200 #else 201 DEF2(qemu_ld8u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 202 #endif 203 #if TARGET_LONG_BITS == 32 204 DEF2(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 205 #else 206 DEF2(qemu_ld8s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 207 #endif 208 #if TARGET_LONG_BITS == 32 209 DEF2(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 210 #else 211 DEF2(qemu_ld16u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 212 #endif 213 #if TARGET_LONG_BITS == 32 214 DEF2(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 215 #else 216 DEF2(qemu_ld16s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 217 #endif 218 #if TARGET_LONG_BITS == 32 219 DEF2(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 220 #else 221 DEF2(qemu_ld32u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 222 #endif 223 #if TARGET_LONG_BITS == 32 224 DEF2(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 225 #else 226 DEF2(qemu_ld32s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 227 #endif 228 #if TARGET_LONG_BITS == 32 229 DEF2(qemu_ld64, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 230 #else 231 DEF2(qemu_ld64, 2, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 232 #endif 233 234 #if TARGET_LONG_BITS == 32 235 DEF2(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 236 #else 237 DEF2(qemu_st8, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 238 #endif 239 #if TARGET_LONG_BITS == 32 240 DEF2(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 241 #else 242 DEF2(qemu_st16, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 243 #endif 244 #if TARGET_LONG_BITS == 32 245 DEF2(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 246 #else 247 DEF2(qemu_st32, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 248 #endif 249 #if TARGET_LONG_BITS == 32 250 DEF2(qemu_st64, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 251 #else 252 DEF2(qemu_st64, 0, 4, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 235 DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 236 #else 237 DEF(qemu_ld8u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 238 #endif 239 #if TARGET_LONG_BITS == 32 240 DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 241 #else 242 DEF(qemu_ld8s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 243 #endif 244 #if TARGET_LONG_BITS == 32 245 DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 246 #else 247 DEF(qemu_ld16u, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 248 #endif 249 #if TARGET_LONG_BITS == 32 250 DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 251 #else 252 DEF(qemu_ld16s, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 253 #endif 254 #if TARGET_LONG_BITS == 32 255 DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 256 #else 257 DEF(qemu_ld32, 1, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 258 #endif 259 #if TARGET_LONG_BITS == 32 260 DEF(qemu_ld64, 2, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 261 #else 262 DEF(qemu_ld64, 2, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 263 #endif 264 265 #if TARGET_LONG_BITS == 32 266 DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 267 #else 268 DEF(qemu_st8, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 269 #endif 270 #if TARGET_LONG_BITS == 32 271 DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 272 #else 273 DEF(qemu_st16, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 274 #endif 275 #if TARGET_LONG_BITS == 32 276 DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 277 #else 278 DEF(qemu_st32, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 279 #endif 280 #if TARGET_LONG_BITS == 32 281 DEF(qemu_st64, 0, 3, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 282 #else 283 DEF(qemu_st64, 0, 4, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 253 284 #endif 254 285 255 286 #else /* TCG_TARGET_REG_BITS == 32 */ 256 287 257 DEF2(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 258 DEF2(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 259 DEF2(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 260 DEF2(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 261 DEF2(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 262 DEF2(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 263 DEF2(qemu_ld64, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 264 265 DEF2(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 266 DEF2(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 267 DEF2(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 268 DEF2(qemu_st64, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 288 DEF(qemu_ld8u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 289 DEF(qemu_ld8s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 290 DEF(qemu_ld16u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 291 DEF(qemu_ld16s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 292 DEF(qemu_ld32, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 293 DEF(qemu_ld32u, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 294 DEF(qemu_ld32s, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 295 DEF(qemu_ld64, 1, 1, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 296 297 DEF(qemu_st8, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 298 DEF(qemu_st16, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 299 DEF(qemu_st32, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 300 DEF(qemu_st64, 0, 2, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) 269 301 270 302 #endif /* TCG_TARGET_REG_BITS != 32 */ 271 303 272 #undef DEF 2304 #undef DEF -
trunk/src/recompiler/tcg/tcg-runtime.h
r37675 r37689 3 3 4 4 /* tcg-runtime.c */ 5 int32_t tcg_helper_div_i32(int32_t arg1, int32_t arg2); 6 int32_t tcg_helper_rem_i32(int32_t arg1, int32_t arg2); 7 uint32_t tcg_helper_divu_i32(uint32_t arg1, uint32_t arg2); 8 uint32_t tcg_helper_remu_i32(uint32_t arg1, uint32_t arg2); 9 5 10 int64_t tcg_helper_shl_i64(int64_t arg1, int64_t arg2); 6 11 int64_t tcg_helper_shr_i64(int64_t arg1, int64_t arg2); -
trunk/src/recompiler/tcg/tcg.c
r37676 r37689 28 28 #include "config.h" 29 29 30 #if ndef CONFIG_DEBUG_TCG30 #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG) 31 31 /* define it to suppress various consistency checks (faster) */ 32 32 #define NDEBUG … … 53 53 #include "cache-utils.h" 54 54 #include "host-utils.h" 55 #include "qemu-timer.h" 55 56 56 57 /* Note: the long term plan is to reduce the dependancies on the QEMU … … 72 73 * Liveness analysis doesn't work well with 32-bit hosts and 64-bit targets, 73 74 * second element of the register pair to store 64-bit value is considered 74 * dead, it seems. 75 * @todo: fix it in compiler 76 */ 75 * dead, it seems. */ 76 /** @todo re-test this */ 77 77 # if defined(TARGET_X86_64) && (TCG_TARGET_REG_BITS == 32) 78 78 # undef USE_LIVENESS_ANALYSIS … … 80 80 #endif /* VBOX */ 81 81 82 static void tcg_target_init(TCGContext *s); 83 static void tcg_target_qemu_prologue(TCGContext *s); 82 84 static void patch_reloc(uint8_t *code_ptr, int type, 83 85 tcg_target_long value, tcg_target_long addend); 84 86 85 87 static TCGOpDef tcg_op_defs[] = { 86 #define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size }, 87 #ifndef VBOX 88 #define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0 }, 89 #else /* VBOX */ 90 # define DEF2(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags, 0, 0, 0 }, 91 #endif /* VBOX */ 88 #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, 92 89 #include "tcg-opc.h" 93 90 #undef DEF 94 #undef DEF295 91 }; 96 92 … … 121 117 /* label relocation processing */ 122 118 123 void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,124 int label_index, long addend)119 static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type, 120 int label_index, long addend) 125 121 { 126 122 TCGLabel *l; … … 261 257 262 258 tcg_target_init(s); 263 259 } 260 261 void tcg_prologue_init(TCGContext *s) 262 { 264 263 /* init global prologue and epilogue */ 265 264 s->code_buf = code_gen_prologue; … … 575 574 int sizemask, TCGArg ret, int nargs, TCGArg *args) 576 575 { 576 #ifdef TCG_TARGET_I386 577 577 int call_type; 578 #endif 578 579 int i; 579 580 int real_args; 580 581 int nb_rets; 581 582 TCGArg *nparam; 583 584 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 585 for (i = 0; i < nargs; ++i) { 586 int is_64bit = sizemask & (1 << (i+1)*2); 587 int is_signed = sizemask & (2 << (i+1)*2); 588 if (!is_64bit) { 589 TCGv_i64 temp = tcg_temp_new_i64(); 590 TCGv_i64 orig = MAKE_TCGV_I64(args[i]); 591 if (is_signed) { 592 tcg_gen_ext32s_i64(temp, orig); 593 } else { 594 tcg_gen_ext32u_i64(temp, orig); 595 } 596 args[i] = GET_TCGV_I64(temp); 597 } 598 } 599 #endif /* TCG_TARGET_EXTEND_ARGS */ 600 582 601 *gen_opc_ptr++ = INDEX_op_call; 583 602 nparam = gen_opparam_ptr++; 603 #ifdef TCG_TARGET_I386 584 604 call_type = (flags & TCG_CALL_TYPE_MASK); 605 #endif 585 606 if (ret != TCG_CALL_DUMMY_ARG) { 586 607 #if TCG_TARGET_REG_BITS < 64 … … 606 627 for (i = 0; i < nargs; i++) { 607 628 #if TCG_TARGET_REG_BITS < 64 608 if (sizemask & (2 << i)) { 629 int is_64bit = sizemask & (1 << (i+1)*2); 630 if (is_64bit) { 609 631 #ifdef TCG_TARGET_I386 610 632 /* REGPARM case: if the third parameter is 64 bit, it is … … 622 644 } 623 645 #endif 624 #ifdef TCG_TARGET_WORDS_BIGENDIAN 646 /* If stack grows up, then we will be placing successive 647 arguments at lower addresses, which means we need to 648 reverse the order compared to how we would normally 649 treat either big or little-endian. For those arguments 650 that will wind up in registers, this still works for 651 HPPA (the only current STACK_GROWSUP target) since the 652 argument registers are *also* allocated in decreasing 653 order. If another such target is added, this logic may 654 have to get more complicated to differentiate between 655 stack arguments and register arguments. */ 656 #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) 625 657 *gen_opparam_ptr++ = args[i] + 1; 626 658 *gen_opparam_ptr++ = args[i]; … … 630 662 #endif 631 663 real_args += 2; 632 } else633 #endif 634 { 635 *gen_opparam_ptr++ = args[i]; 636 real_args++;637 }664 continue; 665 } 666 #endif /* TCG_TARGET_REG_BITS < 64 */ 667 668 *gen_opparam_ptr++ = args[i]; 669 real_args++; 638 670 } 639 671 *gen_opparam_ptr++ = GET_TCGV_PTR(func); … … 645 677 /* total parameters, needed to go backward in the instruction stream */ 646 678 *gen_opparam_ptr++ = 1 + nb_rets + real_args + 3; 679 680 #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 681 for (i = 0; i < nargs; ++i) { 682 int is_64bit = sizemask & (1 << (i+1)*2); 683 if (!is_64bit) { 684 TCGv_i64 temp = MAKE_TCGV_I64(args[i]); 685 tcg_temp_free_i64(temp); 686 } 687 } 688 #endif /* TCG_TARGET_EXTEND_ARGS */ 647 689 } 648 690 … … 696 738 #endif 697 739 740 698 741 static void tcg_reg_alloc_start(TCGContext *s) 699 742 { … … 813 856 const TCGArg *args; 814 857 TCGArg arg; 815 int c, i, k, nb_oargs, nb_iargs, nb_cargs, first_insn; 858 TCGOpcode c; 859 int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn; 816 860 const TCGOpDef *def; 817 861 char buf[128]; … … 919 963 tcg_get_arg_str_idx(s, buf, sizeof(buf), args[k++])); 920 964 } 921 if (c == INDEX_op_brcond_i32 965 switch (c) { 966 case INDEX_op_brcond_i32: 922 967 #if TCG_TARGET_REG_BITS == 32 923 || c == INDEX_op_brcond2_i32968 case INDEX_op_brcond2_i32: 924 969 #elif TCG_TARGET_REG_BITS == 64 925 || c == INDEX_op_brcond_i64 926 #endif 927 ) { 970 case INDEX_op_brcond_i64: 971 #endif 972 case INDEX_op_setcond_i32: 973 #if TCG_TARGET_REG_BITS == 32 974 case INDEX_op_setcond2_i32: 975 #elif TCG_TARGET_REG_BITS == 64 976 case INDEX_op_setcond_i64: 977 #endif 928 978 if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) 929 979 fprintf(outfile, ",%s", cond_name[args[k++]]); … … 931 981 fprintf(outfile, ",$0x%" TCG_PRIlx, args[k++]); 932 982 i = 1; 933 }934 else983 break; 984 default: 935 985 i = 0; 986 break; 987 } 936 988 for(; i < nb_cargs; i++) { 937 989 if (k != 0) … … 992 1044 void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs) 993 1045 { 994 intop;1046 TCGOpcode op; 995 1047 TCGOpDef *def; 996 1048 const char *ct_str; … … 998 1050 999 1051 for(;;) { 1000 if (tdefs->op < 0)1052 if (tdefs->op == (TCGOpcode)-1) 1001 1053 break; 1002 1054 op = tdefs->op; 1003 1055 assert(op >= 0 && op < NB_OPS); 1004 1056 def = &tcg_op_defs[op]; 1057 #if defined(CONFIG_DEBUG_TCG) 1058 /* Duplicate entry in op definitions? */ 1059 assert(!def->used); 1060 def->used = 1; 1061 #endif 1005 1062 nb_args = def->nb_iargs + def->nb_oargs; 1006 1063 for(i = 0; i < nb_args; i++) { 1007 1064 ct_str = tdefs->args_ct_str[i]; 1065 /* Incomplete TCGTargetOpDef entry? */ 1066 assert(ct_str != NULL); 1008 1067 tcg_regset_clear(def->args_ct[i].u.regs); 1009 1068 def->args_ct[i].ct = 0; … … 1033 1092 fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n", 1034 1093 ct_str, i, def->name); 1035 #ifdef VBOX1036 tcg_exit(1);1037 #else1038 1094 exit(1); 1039 #endif1040 1095 } 1041 1096 } … … 1043 1098 } 1044 1099 } 1100 1101 /* TCGTargetOpDef entry with too much information? */ 1102 assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); 1045 1103 1046 1104 /* sort the constraints (XXX: this is just an heuristic) */ … … 1061 1119 } 1062 1120 1121 #if defined(CONFIG_DEBUG_TCG) 1122 i = 0; 1123 for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) { 1124 if (op < INDEX_op_call || op == INDEX_op_debug_insn_start) { 1125 /* Wrong entry in op definitions? */ 1126 if (tcg_op_defs[op].used) { 1127 fprintf(stderr, "Invalid op definition for %s\n", 1128 tcg_op_defs[op].name); 1129 i = 1; 1130 } 1131 } else { 1132 /* Missing entry in op definitions? */ 1133 if (!tcg_op_defs[op].used) { 1134 fprintf(stderr, "Missing op definition for %s\n", 1135 tcg_op_defs[op].name); 1136 i = 1; 1137 } 1138 } 1139 } 1140 if (i == 1) { 1141 tcg_abort(); 1142 } 1143 #endif 1063 1144 } 1064 1145 … … 1111 1192 static void tcg_liveness_analysis(TCGContext *s) 1112 1193 { 1113 int i, op_index, op, nb_args, nb_iargs, nb_oargs, arg, nb_ops; 1194 int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops; 1195 TCGOpcode op; 1114 1196 TCGArg *args; 1115 1197 const TCGOpDef *def; … … 1259 1341 #else 1260 1342 /* dummy liveness analysis */ 1261 void tcg_liveness_analysis(TCGContext *s)1343 static void tcg_liveness_analysis(TCGContext *s) 1262 1344 { 1263 1345 int nb_ops; … … 1350 1432 if (s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end) 1351 1433 #else 1352 if (( unsigned)s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end)1434 if ((tcg_target_long)s->current_frame_offset + sizeof(tcg_target_long) > s->frame_end) 1353 1435 #endif 1354 1436 tcg_abort(); … … 1524 1606 } 1525 1607 if (ts->reg != reg) { 1526 tcg_out_mov(s, reg, ts->reg);1608 tcg_out_mov(s, ots->type, reg, ts->reg); 1527 1609 } 1528 1610 } … … 1556 1638 1557 1639 static void tcg_reg_alloc_op(TCGContext *s, 1558 const TCGOpDef *def, intopc,1640 const TCGOpDef *def, TCGOpcode opc, 1559 1641 const TCGArg *args, 1560 1642 unsigned int dead_iargs) … … 1629 1711 and move the temporary register into it */ 1630 1712 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); 1631 tcg_out_mov(s, reg, ts->reg);1713 tcg_out_mov(s, ts->type, reg, ts->reg); 1632 1714 } 1633 1715 new_args[i] = reg; … … 1711 1793 reg = new_args[i]; 1712 1794 if (ts->fixed_reg && ts->reg != reg) { 1713 tcg_out_mov(s, ts-> reg, reg);1795 tcg_out_mov(s, ts->type, ts->reg, reg); 1714 1796 } 1715 1797 } … … 1723 1805 1724 1806 static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, 1725 intopc, const TCGArg *args,1807 TCGOpcode opc, const TCGArg *args, 1726 1808 unsigned int dead_iargs) 1727 1809 { … … 1797 1879 if (ts->val_type == TEMP_VAL_REG) { 1798 1880 if (ts->reg != reg) { 1799 tcg_out_mov(s, reg, ts->reg);1881 tcg_out_mov(s, ts->type, reg, ts->reg); 1800 1882 } 1801 1883 } else if (ts->val_type == TEMP_VAL_MEM) { … … 1826 1908 if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) { 1827 1909 reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); 1828 tcg_out_mov(s, reg, ts->reg);1910 tcg_out_mov(s, ts->type, reg, ts->reg); 1829 1911 } 1830 1912 func_arg = reg; … … 1885 1967 if (ts->fixed_reg) { 1886 1968 if (ts->reg != reg) { 1887 tcg_out_mov(s, ts-> reg, reg);1969 tcg_out_mov(s, ts->type, ts->reg, reg); 1888 1970 } 1889 1971 } else { … … 1920 2002 long search_pc) 1921 2003 { 1922 int opc, op_index; 2004 TCGOpcode opc; 2005 int op_index; 1923 2006 const TCGOpDef *def; 1924 2007 unsigned int dead_iargs; -
trunk/src/recompiler/tcg/tcg.h
r37675 r37689 48 48 #endif 49 49 50 enum{51 #define DEF( s, n, copy_size) INDEX_op_ ## s,50 typedef enum TCGOpcode { 51 #define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name, 52 52 #include "tcg-opc.h" 53 53 #undef DEF 54 54 NB_OPS, 55 } ;55 } TCGOpcode; 56 56 57 57 #define tcg_regset_clear(d) (d) = 0 … … 97 97 #define TCG_STATIC_CALL_ARGS_SIZE 128 98 98 99 typedef int TCGType;100 101 #define TCG_TYPE_I32 0 102 #define TCG_TYPE_I64 1 103 #define TCG_TYPE_COUNT 2 /* number of different types */ 104 99 typedef enum TCGType { 100 TCG_TYPE_I32, 101 TCG_TYPE_I64, 102 TCG_TYPE_COUNT, /* number of different types */ 103 104 /* An alias for the size of the host register. */ 105 105 #if TCG_TARGET_REG_BITS == 32 106 #define TCG_TYPE_PTR TCG_TYPE_I32 107 #else 108 #define TCG_TYPE_PTR TCG_TYPE_I64 109 #endif 106 TCG_TYPE_REG = TCG_TYPE_I32, 107 #else 108 TCG_TYPE_REG = TCG_TYPE_I64, 109 #endif 110 111 /* An alias for the size of the native pointer. We don't currently 112 support any hosts with 64-bit registers and 32-bit pointers. */ 113 TCG_TYPE_PTR = TCG_TYPE_REG, 114 115 /* An alias for the size of the target "long", aka register. */ 116 #if TARGET_LONG_BITS == 64 117 TCG_TYPE_TL = TCG_TYPE_I64, 118 #else 119 TCG_TYPE_TL = TCG_TYPE_I32, 120 #endif 121 } TCGType; 110 122 111 123 typedef tcg_target_ulong TCGArg; … … 205 217 TCG_COND_GTU, 206 218 } TCGCond; 219 220 /* Invert the sense of the comparison. */ 221 static inline TCGCond tcg_invert_cond(TCGCond c) 222 { 223 return (TCGCond)(c ^ 1); 224 } 225 226 /* Swap the operands in a comparison. */ 227 static inline TCGCond tcg_swap_cond(TCGCond c) 228 { 229 int mask = (c < TCG_COND_LT ? 0 : c < TCG_COND_LTU ? 7 : 15); 230 return (TCGCond)(c ^ mask); 231 } 232 233 static inline TCGCond tcg_unsigned_cond(TCGCond c) 234 { 235 return (c >= TCG_COND_LT && c <= TCG_COND_GT ? c + 4 : c); 236 } 207 237 208 238 #define TEMP_VAL_DEAD 0 … … 324 354 325 355 void tcg_context_init(TCGContext *s); 356 void tcg_prologue_init(TCGContext *s); 326 357 void tcg_func_start(TCGContext *s); 327 358 … … 392 423 uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args; 393 424 uint8_t flags; 394 uint16_t copy_size;395 425 TCGArgConstraint *args_ct; 396 426 int *sorted_args; 427 #if defined(CONFIG_DEBUG_TCG) 428 int used; 429 #endif 397 430 } TCGOpDef; 398 431 399 432 typedef struct TCGTargetOpDef { 400 intop;433 TCGOpcode op; 401 434 const char *args_ct_str[TCG_MAX_OP_ARGS]; 402 435 } TCGTargetOpDef; 403 404 void tcg_target_init(TCGContext *s);405 void tcg_target_qemu_prologue(TCGContext *s);406 436 407 437 #ifndef VBOX … … 465 495 TCGv_i64 tcg_const_local_i64(int64_t val); 466 496 467 void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,468 int label_index, long addend);469 470 497 #ifndef VBOX 471 498 extern uint8_t code_gen_prologue[]; -
trunk/src/recompiler/tests/Makefile
r36175 r37689 1 1 -include ../config-host.mak 2 VPATH=$(SRC_PATH)/tests 2 3 $(call set-vpath, $(SRC_PATH)/tests) 3 4 4 5 CFLAGS=-Wall -O2 -g -fno-strict-aliasing -
trunk/src/recompiler/tests/qruncom.c
r36175 r37689 90 90 #define COM_BASE_ADDR 0x10100 91 91 92 void usage(void)92 static void usage(void) 93 93 { 94 94 printf("qruncom version 0.1 (c) 2003 Fabrice Bellard\n" -
trunk/src/recompiler/tests/runcom.c
r1 r37689 26 26 #define COM_BASE_ADDR 0x10100 27 27 28 void usage(void)28 static void usage(void) 29 29 { 30 30 printf("runcom version 0.1 (c) 2003 Fabrice Bellard\n" … … 52 52 void dump_regs(struct vm86_regs *r) 53 53 { 54 fprintf(stderr, 54 fprintf(stderr, 55 55 "EAX=%08lx EBX=%08lx ECX=%08lx EDX=%08lx\n" 56 56 "ESI=%08lx EDI=%08lx EBP=%08lx ESP=%08lx\n" … … 81 81 usage(); 82 82 filename = argv[1]; 83 84 vm86_mem = mmap((void *)0x00000000, 0x110000, 85 PROT_WRITE | PROT_READ | PROT_EXEC, 83 84 vm86_mem = mmap((void *)0x00000000, 0x110000, 85 PROT_WRITE | PROT_READ | PROT_EXEC, 86 86 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); 87 87 if (vm86_mem == MAP_FAILED) { … … 148 148 { 149 149 int int_num, ah; 150 150 151 151 int_num = VM86_ARG(ret); 152 152 if (int_num != 0x21) -
trunk/src/recompiler/tests/sha1.c
r37675 r37689 24 24 #include <stdio.h> 25 25 #include <string.h> 26 #include <s ys/types.h> /* for u_int*_t */26 #include <stdint.h> 27 27 28 28 /* ================ sha1.h ================ */ … … 34 34 35 35 typedef struct { 36 u _int32_t state[5];37 u _int32_t count[2];36 uint32_t state[5]; 37 uint32_t count[2]; 38 38 unsigned char buffer[64]; 39 39 } SHA1_CTX; 40 40 41 void SHA1Transform(u _int32_t state[5], const unsigned char buffer[64]);41 void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]); 42 42 void SHA1Init(SHA1_CTX* context); 43 void SHA1Update(SHA1_CTX* context, const unsigned char* data, u _int32_t len);43 void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len); 44 44 void SHA1Final(unsigned char digest[20], SHA1_CTX* context); 45 45 /* ================ end of sha1.h ================ */ … … 71 71 /* Hash a single 512-bit block. This is the core of the algorithm. */ 72 72 73 void SHA1Transform(u _int32_t state[5], const unsigned char buffer[64])74 { 75 u _int32_t a, b, c, d, e;73 void SHA1Transform(uint32_t state[5], const unsigned char buffer[64]) 74 { 75 uint32_t a, b, c, d, e; 76 76 typedef union { 77 77 unsigned char c[64]; 78 u _int32_t l[16];78 uint32_t l[16]; 79 79 } CHAR64LONG16; 80 80 #ifdef SHA1HANDSOFF … … 146 146 /* Run your data through this. */ 147 147 148 void SHA1Update(SHA1_CTX* context, const unsigned char* data, u _int32_t len)149 { 150 u _int32_t i;151 u _int32_t j;148 void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len) 149 { 150 uint32_t i; 151 uint32_t j; 152 152 153 153 j = context->count[0]; … … 187 187 for (i = 0; i < 2; i++) 188 188 { 189 u _int32_t t = context->count[i];189 uint32_t t = context->count[i]; 190 190 int j; 191 191 -
trunk/src/recompiler/translate-all.c
r36175 r37689 40 40 #include "disas.h" 41 41 #include "tcg.h" 42 #include "qemu-timer.h" 42 43 43 44 /* code generation context */ … … 50 51 uint16_t gen_opc_icount[OPC_BUF_SIZE]; 51 52 uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; 52 #if defined(TARGET_I386)53 uint8_t gen_opc_cc_op[OPC_BUF_SIZE];54 #elif defined(TARGET_SPARC)55 target_ulong gen_opc_npc[OPC_BUF_SIZE];56 target_ulong gen_opc_jump_pc[2];57 #elif defined(TARGET_MIPS) || defined(TARGET_SH4)58 uint32_t gen_opc_hflags[OPC_BUF_SIZE];59 #endif60 61 /* XXX: suppress that */62 unsigned long code_gen_max_block_size(void)63 {64 #ifdef VBOX65 /* Just to suppress a lot of dummy warnings */66 static long max;67 #else68 static unsigned long max;69 #endif70 71 if (max == 0) {72 max = TCG_MAX_OP_SIZE;73 #define DEF(s, n, copy_size) max = copy_size > max? copy_size : max;74 #include "tcg-opc.h"75 #undef DEF76 max *= OPC_MAX_SIZE;77 }78 79 return max;80 }81 53 82 54 void cpu_gen_init(void) … … 124 96 s->tb_jmp_offset = tb->tb_jmp_offset; 125 97 s->tb_next = NULL; 126 /* the following two entries are optional (only used for string ops) */127 /* XXX: not used ? */128 tb->tb_jmp_offset[2] = 0xffff;129 tb->tb_jmp_offset[3] = 0xffff;130 98 #else 131 99 s->tb_jmp_offset = NULL;
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器