vbox的更動 15416 路徑 trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
- 時間撮記:
- 2008-12-13 上午05:31:06 (16 年 以前)
- 檔案:
-
- 修改 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r14978 r15416 35 35 %endif 36 36 37 38 ;******************************************************************************* 39 ;* External Symbols * 40 ;******************************************************************************* 41 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 42 extern NAME(SUPR0AbsIs64bit) 43 extern NAME(SUPR0Abs64bitKernelCS) 44 extern NAME(SUPR0Abs64bitKernelSS) 45 extern NAME(SUPR0Abs64bitKernelDS) 46 extern NAME(SUPR0AbsKernelCS) 47 %endif 48 49 50 ;******************************************************************************* 51 ;* Global Variables * 52 ;******************************************************************************* 53 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 54 BEGINDATA 55 ;; 56 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without 57 ; needing to clobber a register. (This trick doesn't quite work for PE btw. 58 ; but that's not relevant atm.) 59 GLOBALNAME g_fCPUMIs64bitHost 60 dd NAME(SUPR0AbsIs64bit) 61 %endif 62 63 37 64 BEGINCODE 38 65 … … 45 72 ; 46 73 align 16 47 BEGINPROC CPUMR0SaveGuestRestoreHostFPUState74 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 48 75 %ifdef RT_ARCH_AMD64 49 76 %ifdef RT_OS_WINDOWS … … 59 86 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 60 87 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 61 jz short gth_fpu_no88 jz short .fpu_not_used 62 89 63 90 mov xAX, cr0 … … 66 93 mov cr0, xAX 67 94 95 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 97 jz .legacy_mode 98 db 0xea ; jmp far .sixtyfourbit_mode 99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 100 .legacy_mode: 101 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 102 68 103 fxsave [xDX + CPUMCPU.Guest.fpu] 69 104 fxrstor [xDX + CPUMCPU.Host.fpu] 70 105 106 .done: 71 107 mov cr0, xCX ; and restore old CR0 again 72 108 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 73 gth_fpu_no:109 .fpu_not_used: 74 110 xor eax, eax 75 111 ret 76 ENDPROC CPUMR0SaveGuestRestoreHostFPUState 112 113 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 114 ALIGNCODE(16) 115 BITS 64 116 .sixtyfourbit_mode: 117 and edx, 0ffffffffh 118 fxsave [rdx + CPUMCPU.Guest.fpu] 119 fxrstor [rdx + CPUMCPU.Host.fpu] 120 jmp far [.fpret wrt rip] 121 .fpret: ; 16:32 Pointer to .the_end. 122 dd .done, NAME(SUPR0AbsKernelCS) 123 BITS 32 124 %endif 125 ENDPROC cpumR0SaveGuestRestoreHostFPUState 77 126 78 127 ;; … … 83 132 ; 84 133 align 16 85 BEGINPROC CPUMR0RestoreHostFPUState134 BEGINPROC cpumR0RestoreHostFPUState 86 135 %ifdef RT_ARCH_AMD64 87 136 %ifdef RT_OS_WINDOWS … … 97 146 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 98 147 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 99 jz short gth_fpu_no_2148 jz short .fpu_not_used 100 149 101 150 mov xAX, cr0 … … 104 153 mov cr0, xAX 105 154 155 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 156 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 157 jz .legacy_mode 158 db 0xea ; jmp far .sixtyfourbit_mode 159 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 160 .legacy_mode: 161 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 162 106 163 fxrstor [xDX + CPUMCPU.Host.fpu] 107 164 165 .done: 108 166 mov cr0, xCX ; and restore old CR0 again 109 167 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 110 gth_fpu_no_2:168 .fpu_not_used: 111 169 xor eax, eax 112 170 ret 113 ENDPROC CPUMR0RestoreHostFPUState 171 172 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 173 ALIGNCODE(16) 174 BITS 64 175 .sixtyfourbit_mode: 176 and edx, 0ffffffffh 177 fxrstor [rdx + CPUMCPU.Host.fpu] 178 jmp far [.fpret wrt rip] 179 .fpret: ; 16:32 Pointer to .the_end. 180 dd .done, NAME(SUPR0AbsKernelCS) 181 BITS 32 182 %endif 183 ENDPROC cpumR0RestoreHostFPUState 184 114 185 115 186 ;; … … 129 200 mov xDX, dword [esp + 4] 130 201 %endif 202 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 203 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 204 jz .legacy_mode 205 db 0xea ; jmp far .sixtyfourbit_mode 206 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 207 .legacy_mode: 208 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 209 131 210 fxrstor [xDX + CPUMCTX.fpu] 132 ret 211 .done: 212 ret 213 214 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 215 ALIGNCODE(16) 216 BITS 64 217 .sixtyfourbit_mode: 218 and edx, 0ffffffffh 219 fxrstor [rdx + CPUMCTX.fpu] 220 jmp far [.fpret wrt rip] 221 .fpret: ; 16:32 Pointer to .the_end. 222 dd .done, NAME(SUPR0AbsKernelCS) 223 BITS 32 224 %endif 133 225 ENDPROC CPUMLoadFPU 134 226 … … 150 242 mov xDX, dword [esp + 4] 151 243 %endif 244 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 245 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 246 jz .legacy_mode 247 db 0xea ; jmp far .sixtyfourbit_mode 248 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 249 .legacy_mode: 250 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 152 251 fxsave [xDX + CPUMCTX.fpu] 153 ret 252 .done: 253 ret 254 255 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 256 ALIGNCODE(16) 257 BITS 64 258 .sixtyfourbit_mode: 259 and edx, 0ffffffffh 260 fxsave [rdx + CPUMCTX.fpu] 261 jmp far [.fpret wrt rip] 262 .fpret: ; 16:32 Pointer to .the_end. 263 dd .done, NAME(SUPR0AbsKernelCS) 264 BITS 32 265 %endif 154 266 ENDPROC CPUMSaveFPU 155 267 … … 171 283 mov xDX, dword [esp + 4] 172 284 %endif 285 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 286 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 287 jz .legacy_mode 288 db 0xea ; jmp far .sixtyfourbit_mode 289 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 290 .legacy_mode: 291 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 292 173 293 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] 174 294 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1] … … 182 302 %ifdef RT_ARCH_AMD64 183 303 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 184 jz CPUMLoadXMM_done304 jz .done 185 305 186 306 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] … … 192 312 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14] 193 313 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 194 CPUMLoadXMM_done: 195 %endif 196 197 ret 314 %endif 315 .done: 316 317 ret 318 319 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 320 ALIGNCODE(16) 321 BITS 64 322 .sixtyfourbit_mode: 323 and edx, 0ffffffffh 324 325 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] 326 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1] 327 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2] 328 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3] 329 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4] 330 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5] 331 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6] 332 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7] 333 334 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 335 jz .sixtyfourbit_done 336 337 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] 338 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9] 339 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10] 340 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11] 341 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12] 342 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13] 343 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14] 344 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 345 .sixtyfourbit_done: 346 jmp far [.fpret wrt rip] 347 .fpret: ; 16:32 Pointer to .the_end. 348 dd .done, NAME(SUPR0AbsKernelCS) 349 BITS 32 350 %endif 198 351 ENDPROC CPUMLoadXMM 199 352 … … 215 368 mov xDX, dword [esp + 4] 216 369 %endif 370 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 371 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 372 jz .legacy_mode 373 db 0xea ; jmp far .sixtyfourbit_mode 374 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 375 .legacy_mode: 376 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 377 217 378 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 218 379 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1 … … 226 387 %ifdef RT_ARCH_AMD64 227 388 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 228 jz CPUMSaveXMM_done389 jz .done 229 390 230 391 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8 … … 237 398 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 238 399 239 CPUMSaveXMM_done: 240 %endif 241 ret 400 %endif 401 .done: 402 ret 403 404 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 405 ALIGNCODE(16) 406 BITS 64 407 .sixtyfourbit_mode: 408 and edx, 0ffffffffh 409 410 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 411 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1 412 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2 413 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3 414 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4 415 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5 416 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6 417 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7 418 419 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 420 jz .sixtyfourbit_done 421 422 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8 423 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9 424 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10 425 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11 426 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12 427 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13 428 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14 429 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 430 431 .sixtyfourbit_done: 432 jmp far [.fpret wrt rip] 433 .fpret: ; 16:32 Pointer to .the_end. 434 dd .done, NAME(SUPR0AbsKernelCS) 435 BITS 32 436 %endif 437 242 438 ENDPROC CPUMSaveXMM 243 439 … … 248 444 ; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word 249 445 align 16 250 BEGINPROC CPUMR0SetFCW446 BEGINPROC cpumR0SetFCW 251 447 %ifdef RT_ARCH_AMD64 252 448 %ifdef RT_OS_WINDOWS … … 263 459 pop xAX 264 460 ret 265 ENDPROC CPUMR0SetFCW461 ENDPROC cpumR0SetFCW 266 462 267 463 … … 270 466 ; 271 467 align 16 272 BEGINPROC CPUMR0GetFCW468 BEGINPROC cpumR0GetFCW 273 469 fnstcw [xSP - 8] 274 470 mov ax, word [xSP - 8] 275 471 ret 276 ENDPROC CPUMR0GetFCW472 ENDPROC cpumR0GetFCW 277 473 278 474 … … 282 478 ; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR 283 479 align 16 284 BEGINPROC CPUMR0SetMXCSR480 BEGINPROC cpumR0SetMXCSR 285 481 %ifdef RT_ARCH_AMD64 286 482 %ifdef RT_OS_WINDOWS … … 296 492 pop xAX 297 493 ret 298 ENDPROC CPUMR0SetMXCSR494 ENDPROC cpumR0SetMXCSR 299 495 300 496 … … 303 499 ; 304 500 align 16 305 BEGINPROC CPUMR0GetMXCSR501 BEGINPROC cpumR0GetMXCSR 306 502 stmxcsr [xSP - 8] 307 503 mov eax, dword [xSP - 8] 308 504 ret 309 ENDPROC CPUMR0GetMXCSR505 ENDPROC cpumR0GetMXCSR
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器