儲存庫 vbox 的更動 15416
- 時間撮記:
- 2008-12-13 上午05:31:06 (16 年 以前)
- 位置:
- trunk/src/VBox/VMM
- 檔案:
-
- 修改 6 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/src/VBox/VMM/CPUMInternal.h
r15414 r15416 377 377 __BEGIN_DECLS 378 378 379 DECLASM(int) CPUMHandleLazyFPUAsm(PCPUMCPU pCPUM);379 DECLASM(int) cpumHandleLazyFPUAsm(PCPUMCPU pCPUM); 380 380 381 381 #ifdef IN_RING0 382 DECLASM(int) CPUMR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM);383 DECLASM(int) CPUMR0RestoreHostFPUState(PCPUMCPU pCPUM);384 DECLASM(void) CPUMR0LoadFPU(PCPUMCTX pCtx);385 DECLASM(void) CPUMR0SaveFPU(PCPUMCTX pCtx);386 DECLASM(void) CPUMR0LoadXMM(PCPUMCTX pCtx);387 DECLASM(void) CPUMR0SaveXMM(PCPUMCTX pCtx);388 DECLASM(void) CPUMR0SetFCW(uint16_t u16FCW);389 DECLASM(uint16_t) CPUMR0GetFCW();390 DECLASM(void) CPUMR0SetMXCSR(uint32_t u32MXCSR);391 DECLASM(uint32_t) CPUMR0GetMXCSR();382 DECLASM(int) cpumR0SaveGuestRestoreHostFPUState(PCPUMCPU pCPUM); 383 DECLASM(int) cpumR0RestoreHostFPUState(PCPUMCPU pCPUM); 384 DECLASM(void) cpumR0LoadFPU(PCPUMCTX pCtx); 385 DECLASM(void) cpumR0SaveFPU(PCPUMCTX pCtx); 386 DECLASM(void) cpumR0LoadXMM(PCPUMCTX pCtx); 387 DECLASM(void) cpumR0SaveXMM(PCPUMCTX pCtx); 388 DECLASM(void) cpumR0SetFCW(uint16_t u16FCW); 389 DECLASM(uint16_t) cpumR0GetFCW(void); 390 DECLASM(void) cpumR0SetMXCSR(uint32_t u32MXCSR); 391 DECLASM(uint32_t) cpumR0GetMXCSR(void); 392 392 #endif 393 393 -
trunk/src/VBox/VMM/VMMAll/CPUMAllA.asm
r14870 r15416 59 59 ; 60 60 align 16 61 BEGINPROC CPUMHandleLazyFPUAsm61 BEGINPROC cpumHandleLazyFPUAsm 62 62 ; 63 63 ; Figure out what to do. … … 199 199 mov eax, VINF_EM_RAW_GUEST_TRAP 200 200 ret 201 ENDPROC CPUMHandleLazyFPUAsm202 203 201 ENDPROC cpumHandleLazyFPUAsm 202 203 -
trunk/src/VBox/VMM/VMMAll/CPUMAllRegs.cpp
r15390 r15416 1517 1517 break; 1518 1518 } 1519 1519 1520 1520 default: 1521 1521 AssertMsgFailed(("enmFeature=%d\n", enmFeature)); … … 2083 2083 VMMDECL(int) CPUMHandleLazyFPU(PVM pVM, PVMCPU pVCpu) 2084 2084 { 2085 return CPUMHandleLazyFPUAsm(&pVCpu->cpum.s);2085 return cpumHandleLazyFPUAsm(&pVCpu->cpum.s); 2086 2086 } 2087 2087 -
trunk/src/VBox/VMM/VMMGC/TRPMGCHandlersA.asm
r11473 r15416 39 39 extern IMPNAME(g_VM) ; are a bit confusing at first... :-) 40 40 extern NAME(CPUMGCRestoreInt) 41 extern NAME( CPUMHandleLazyFPUAsm)41 extern NAME(cpumHandleLazyFPUAsm) 42 42 extern NAME(CPUMHyperSetCtxCore) 43 43 extern NAME(trpmGCTrapInGeneric) -
trunk/src/VBox/VMM/VMMR0/CPUMR0.cpp
r15414 r15416 178 178 { 179 179 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 180 uint64_t oldMsrEFERHost ;180 uint64_t oldMsrEFERHost = 0; 181 181 uint32_t oldCR0 = ASMGetCR0(); 182 182 … … 217 217 pVCpu->cpum.s.Host.fpu.MXCSR = CPUMGetMXCSR(); 218 218 219 CPUMR0LoadFPU(pCtx);219 cpumR0LoadFPU(pCtx); 220 220 221 221 /* … … 232 232 { 233 233 /* fxrstor doesn't restore the XMM state! */ 234 CPUMR0LoadXMM(pCtx);234 cpumR0LoadXMM(pCtx); 235 235 pVCpu->cpum.s.fUseFlags |= CPUM_MANUAL_XMM_RESTORE; 236 236 } … … 264 264 HWACCMR0SaveFPUState(pVM, pVCpu, pCtx); 265 265 266 CPUMR0RestoreHostFPUState(&pVCpu->cpum.s);266 cpumR0RestoreHostFPUState(&pVCpu->cpum.s); 267 267 } 268 268 else … … 270 270 { 271 271 #ifndef CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE 272 uint64_t oldMsrEFERHost ;272 uint64_t oldMsrEFERHost = 0; 273 273 274 274 /* Clear MSR_K6_EFER_FFXSR or else we'll be unable to save/restore the XMM state with fxsave/fxrstor. */ … … 278 278 ASMWrMsr(MSR_K6_EFER, oldMsrEFERHost & ~MSR_K6_EFER_FFXSR); 279 279 } 280 CPUMR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s);280 cpumR0SaveGuestRestoreHostFPUState(&pVCpu->cpum.s); 281 281 282 282 /* Restore EFER MSR */ … … 285 285 286 286 #else /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 287 CPUMR0SaveFPU(pCtx);287 cpumR0SaveFPU(pCtx); 288 288 if (pVCpu->cpum.s.fUseFlags & CPUM_MANUAL_XMM_RESTORE) 289 289 { 290 290 /* fxsave doesn't save the XMM state! */ 291 CPUMR0SaveXMM(pCtx);291 cpumR0SaveXMM(pCtx); 292 292 } 293 293 … … 296 296 * We don't want the guest to be able to trigger floating point/SSE exceptions on the host. 297 297 */ 298 CPUMR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW);298 cpumR0SetFCW(pVCpu->cpum.s.Host.fpu.FCW); 299 299 if (pVM->cpum.s.CPUFeatures.edx.u1SSE) 300 CPUMR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR);300 cpumR0SetMXCSR(pVCpu->cpum.s.Host.fpu.MXCSR); 301 301 #endif /* CPUM_CAN_HANDLE_NM_TRAPS_IN_KERNEL_MODE */ 302 302 } -
trunk/src/VBox/VMM/VMMR0/CPUMR0A.asm
r14978 r15416 35 35 %endif 36 36 37 38 ;******************************************************************************* 39 ;* External Symbols * 40 ;******************************************************************************* 41 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 42 extern NAME(SUPR0AbsIs64bit) 43 extern NAME(SUPR0Abs64bitKernelCS) 44 extern NAME(SUPR0Abs64bitKernelSS) 45 extern NAME(SUPR0Abs64bitKernelDS) 46 extern NAME(SUPR0AbsKernelCS) 47 %endif 48 49 50 ;******************************************************************************* 51 ;* Global Variables * 52 ;******************************************************************************* 53 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 54 BEGINDATA 55 ;; 56 ; Store the SUPR0AbsIs64bit absolute value here so we can cmp/test without 57 ; needing to clobber a register. (This trick doesn't quite work for PE btw. 58 ; but that's not relevant atm.) 59 GLOBALNAME g_fCPUMIs64bitHost 60 dd NAME(SUPR0AbsIs64bit) 61 %endif 62 63 37 64 BEGINCODE 38 65 … … 45 72 ; 46 73 align 16 47 BEGINPROC CPUMR0SaveGuestRestoreHostFPUState74 BEGINPROC cpumR0SaveGuestRestoreHostFPUState 48 75 %ifdef RT_ARCH_AMD64 49 76 %ifdef RT_OS_WINDOWS … … 59 86 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 60 87 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 61 jz short gth_fpu_no88 jz short .fpu_not_used 62 89 63 90 mov xAX, cr0 … … 66 93 mov cr0, xAX 67 94 95 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 96 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 97 jz .legacy_mode 98 db 0xea ; jmp far .sixtyfourbit_mode 99 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 100 .legacy_mode: 101 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 102 68 103 fxsave [xDX + CPUMCPU.Guest.fpu] 69 104 fxrstor [xDX + CPUMCPU.Host.fpu] 70 105 106 .done: 71 107 mov cr0, xCX ; and restore old CR0 again 72 108 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 73 gth_fpu_no:109 .fpu_not_used: 74 110 xor eax, eax 75 111 ret 76 ENDPROC CPUMR0SaveGuestRestoreHostFPUState 112 113 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 114 ALIGNCODE(16) 115 BITS 64 116 .sixtyfourbit_mode: 117 and edx, 0ffffffffh 118 fxsave [rdx + CPUMCPU.Guest.fpu] 119 fxrstor [rdx + CPUMCPU.Host.fpu] 120 jmp far [.fpret wrt rip] 121 .fpret: ; 16:32 Pointer to .the_end. 122 dd .done, NAME(SUPR0AbsKernelCS) 123 BITS 32 124 %endif 125 ENDPROC cpumR0SaveGuestRestoreHostFPUState 77 126 78 127 ;; … … 83 132 ; 84 133 align 16 85 BEGINPROC CPUMR0RestoreHostFPUState134 BEGINPROC cpumR0RestoreHostFPUState 86 135 %ifdef RT_ARCH_AMD64 87 136 %ifdef RT_OS_WINDOWS … … 97 146 ; Using fxrstor should ensure that we're not causing unwanted exception on the host. 98 147 test dword [xDX + CPUMCPU.fUseFlags], CPUM_USED_FPU 99 jz short gth_fpu_no_2148 jz short .fpu_not_used 100 149 101 150 mov xAX, cr0 … … 104 153 mov cr0, xAX 105 154 155 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 156 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 157 jz .legacy_mode 158 db 0xea ; jmp far .sixtyfourbit_mode 159 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 160 .legacy_mode: 161 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 162 106 163 fxrstor [xDX + CPUMCPU.Host.fpu] 107 164 165 .done: 108 166 mov cr0, xCX ; and restore old CR0 again 109 167 and dword [xDX + CPUMCPU.fUseFlags], ~CPUM_USED_FPU 110 gth_fpu_no_2:168 .fpu_not_used: 111 169 xor eax, eax 112 170 ret 113 ENDPROC CPUMR0RestoreHostFPUState 171 172 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 173 ALIGNCODE(16) 174 BITS 64 175 .sixtyfourbit_mode: 176 and edx, 0ffffffffh 177 fxrstor [rdx + CPUMCPU.Host.fpu] 178 jmp far [.fpret wrt rip] 179 .fpret: ; 16:32 Pointer to .the_end. 180 dd .done, NAME(SUPR0AbsKernelCS) 181 BITS 32 182 %endif 183 ENDPROC cpumR0RestoreHostFPUState 184 114 185 115 186 ;; … … 129 200 mov xDX, dword [esp + 4] 130 201 %endif 202 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 203 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 204 jz .legacy_mode 205 db 0xea ; jmp far .sixtyfourbit_mode 206 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 207 .legacy_mode: 208 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 209 131 210 fxrstor [xDX + CPUMCTX.fpu] 132 ret 211 .done: 212 ret 213 214 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 215 ALIGNCODE(16) 216 BITS 64 217 .sixtyfourbit_mode: 218 and edx, 0ffffffffh 219 fxrstor [rdx + CPUMCTX.fpu] 220 jmp far [.fpret wrt rip] 221 .fpret: ; 16:32 Pointer to .the_end. 222 dd .done, NAME(SUPR0AbsKernelCS) 223 BITS 32 224 %endif 133 225 ENDPROC CPUMLoadFPU 134 226 … … 150 242 mov xDX, dword [esp + 4] 151 243 %endif 244 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 245 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 246 jz .legacy_mode 247 db 0xea ; jmp far .sixtyfourbit_mode 248 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 249 .legacy_mode: 250 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 152 251 fxsave [xDX + CPUMCTX.fpu] 153 ret 252 .done: 253 ret 254 255 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 256 ALIGNCODE(16) 257 BITS 64 258 .sixtyfourbit_mode: 259 and edx, 0ffffffffh 260 fxsave [rdx + CPUMCTX.fpu] 261 jmp far [.fpret wrt rip] 262 .fpret: ; 16:32 Pointer to .the_end. 263 dd .done, NAME(SUPR0AbsKernelCS) 264 BITS 32 265 %endif 154 266 ENDPROC CPUMSaveFPU 155 267 … … 171 283 mov xDX, dword [esp + 4] 172 284 %endif 285 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 286 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 287 jz .legacy_mode 288 db 0xea ; jmp far .sixtyfourbit_mode 289 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 290 .legacy_mode: 291 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 292 173 293 movdqa xmm0, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] 174 294 movdqa xmm1, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1] … … 182 302 %ifdef RT_ARCH_AMD64 183 303 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 184 jz CPUMLoadXMM_done304 jz .done 185 305 186 306 movdqa xmm8, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] … … 192 312 movdqa xmm14, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14] 193 313 movdqa xmm15, [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 194 CPUMLoadXMM_done: 195 %endif 196 197 ret 314 %endif 315 .done: 316 317 ret 318 319 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 320 ALIGNCODE(16) 321 BITS 64 322 .sixtyfourbit_mode: 323 and edx, 0ffffffffh 324 325 movdqa xmm0, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0] 326 movdqa xmm1, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1] 327 movdqa xmm2, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2] 328 movdqa xmm3, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3] 329 movdqa xmm4, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4] 330 movdqa xmm5, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5] 331 movdqa xmm6, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6] 332 movdqa xmm7, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7] 333 334 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 335 jz .sixtyfourbit_done 336 337 movdqa xmm8, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8] 338 movdqa xmm9, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9] 339 movdqa xmm10, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10] 340 movdqa xmm11, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11] 341 movdqa xmm12, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12] 342 movdqa xmm13, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13] 343 movdqa xmm14, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14] 344 movdqa xmm15, [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15] 345 .sixtyfourbit_done: 346 jmp far [.fpret wrt rip] 347 .fpret: ; 16:32 Pointer to .the_end. 348 dd .done, NAME(SUPR0AbsKernelCS) 349 BITS 32 350 %endif 198 351 ENDPROC CPUMLoadXMM 199 352 … … 215 368 mov xDX, dword [esp + 4] 216 369 %endif 370 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL 371 cmp byte [NAME(g_fCPUMIs64bitHost)], 0 372 jz .legacy_mode 373 db 0xea ; jmp far .sixtyfourbit_mode 374 dd .sixtyfourbit_mode, NAME(SUPR0Abs64bitKernelCS) 375 .legacy_mode: 376 %endif ; VBOX_WITH_HYBRID_32BIT_KERNEL 377 217 378 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 218 379 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1 … … 226 387 %ifdef RT_ARCH_AMD64 227 388 test qword [xDX + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 228 jz CPUMSaveXMM_done389 jz .done 229 390 230 391 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8 … … 237 398 movdqa [xDX + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 238 399 239 CPUMSaveXMM_done: 240 %endif 241 ret 400 %endif 401 .done: 402 ret 403 404 %ifdef VBOX_WITH_HYBRID_32BIT_KERNEL_IN_R0 405 ALIGNCODE(16) 406 BITS 64 407 .sixtyfourbit_mode: 408 and edx, 0ffffffffh 409 410 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*0], xmm0 411 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*1], xmm1 412 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*2], xmm2 413 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*3], xmm3 414 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*4], xmm4 415 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*5], xmm5 416 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*6], xmm6 417 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*7], xmm7 418 419 test qword [rdx + CPUMCTX.msrEFER], MSR_K6_EFER_LMA 420 jz .sixtyfourbit_done 421 422 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*8], xmm8 423 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*9], xmm9 424 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*10], xmm10 425 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*11], xmm11 426 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*12], xmm12 427 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*13], xmm13 428 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*14], xmm14 429 movdqa [rdx + CPUMCTX.fpu + X86FXSTATE.aXMM + 16*15], xmm15 430 431 .sixtyfourbit_done: 432 jmp far [.fpret wrt rip] 433 .fpret: ; 16:32 Pointer to .the_end. 434 dd .done, NAME(SUPR0AbsKernelCS) 435 BITS 32 436 %endif 437 242 438 ENDPROC CPUMSaveXMM 243 439 … … 248 444 ; @param u16FCW x86:[esp+4] GCC:rdi MSC:rcx New FPU control word 249 445 align 16 250 BEGINPROC CPUMR0SetFCW446 BEGINPROC cpumR0SetFCW 251 447 %ifdef RT_ARCH_AMD64 252 448 %ifdef RT_OS_WINDOWS … … 263 459 pop xAX 264 460 ret 265 ENDPROC CPUMR0SetFCW461 ENDPROC cpumR0SetFCW 266 462 267 463 … … 270 466 ; 271 467 align 16 272 BEGINPROC CPUMR0GetFCW468 BEGINPROC cpumR0GetFCW 273 469 fnstcw [xSP - 8] 274 470 mov ax, word [xSP - 8] 275 471 ret 276 ENDPROC CPUMR0GetFCW472 ENDPROC cpumR0GetFCW 277 473 278 474 … … 282 478 ; @param u32MXCSR x86:[esp+4] GCC:rdi MSC:rcx New MXCSR 283 479 align 16 284 BEGINPROC CPUMR0SetMXCSR480 BEGINPROC cpumR0SetMXCSR 285 481 %ifdef RT_ARCH_AMD64 286 482 %ifdef RT_OS_WINDOWS … … 296 492 pop xAX 297 493 ret 298 ENDPROC CPUMR0SetMXCSR494 ENDPROC cpumR0SetMXCSR 299 495 300 496 … … 303 499 ; 304 500 align 16 305 BEGINPROC CPUMR0GetMXCSR501 BEGINPROC cpumR0GetMXCSR 306 502 stmxcsr [xSP - 8] 307 503 mov eax, dword [xSP - 8] 308 504 ret 309 ENDPROC CPUMR0GetMXCSR505 ENDPROC cpumR0GetMXCSR
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器