儲存庫 vbox 的更動 52050
- 時間撮記:
- 2014-7-16 下午01:53:24 (10 年 以前)
- 位置:
- trunk
- 檔案:
-
- 修改 6 筆資料
- 複製 1 筆資料
圖例:
- 未更動
- 新增
- 刪除
-
trunk/include/iprt/err.h
r51862 r52050 921 921 /** An account is restricted in a certain way. */ 922 922 #define VINF_ACCOUNT_RESTRICTED 22405 923 /** Not able satisfy all the requirements of the request. */ 924 #define VERR_UNABLE_TO_SATISFY_REQUIREMENTS (-22406) 925 /** Not able satisfy all the requirements of the request. */ 926 #define VWRN_UNABLE_TO_SATISFY_REQUIREMENTS 22406 927 /** The requested allocation is too big. */ 928 #define VERR_ALLOCATION_TOO_BIG (-22405) 923 929 /** @} */ 924 930 -
trunk/include/iprt/memsafer.h
r52018 r52050 57 57 */ 58 58 59 /** Default memory allocation, non-pageable memory backing, return error 60 * if not possible to allocate such memor. */ 61 #define RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT (0) 62 /** Allow pageable memory backing for cases where the content is not that sensitive 63 * and allocating non-pageable memory failes. Use with care! */ 64 #define RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING RT_BIT_32(1) 59 /** @name RTMEMSAFER_F_XXX 60 * @{ */ 61 /** Require the memory to not hit the page file. 62 * @remarks Makes not guarantees with regards to hibernation / 63 * suspend-to-disk. */ 64 #define RTMEMSAFER_F_REQUIRE_NOT_PAGABLE RT_BIT_32(0) 65 /** Mask of valid bits. */ 66 #define RTMEMSAFER_F_VALID_MASK UINT32_C(0x00000001) 67 /** @} */ 65 68 66 69 /** … … 92 95 RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb); 93 96 94 95 97 /** 96 98 * Allocates memory for sensitive data. … … 102 104 * @param ppvNew Where to return the pointer to the memory. 103 105 * @param cb Number of bytes to allocate. 104 * @param fFlags Flags for controlling the allocation. See RTMEMSAFER_ALLOC_EX_FLAGS_* defines. 106 * @param fFlags Flags for controlling the allocation, see 107 * RTMEMSAFER_F_XXX. 105 108 * @param pszTag Allocation tag used for statistics and such. 106 109 */ … … 116 119 * @param a_ppvNew Where to return the pointer to the memory. 117 120 * @param a_cb Number of bytes to allocate. 118 * @param a_fFlags Flags for controlling the allocation. See RTMEMSAFER_ALLOC_EX_FLAGS_* defines. 121 * @param a_fFlags Flags for controlling the allocation, see 122 * RTMEMSAFER_F_XXX. 119 123 */ 120 124 #define RTMemSaferAllocZEx(a_ppvNew, a_cb, a_fFlags) RTMemSaferAllocZExTag(a_ppvNew, a_cb, a_fFlags, RTMEM_TAG) … … 159 163 * @param cbNew The size of the new allocation. 160 164 * @param ppvNew Where to return the pointer to the new memory. 161 * @param fFlags Flags for controlling the allocation. See RTMEMSAFER_ALLOC_EX_FLAGS_* defines,162 * this takes only effect when allocating completely new memory, for extending or163 * shrinking existing allocations the flags of the allocation take precedence.165 * @param a_fFlags Flags for controlling the allocation, see 166 * RTMEMSAFER_F_XXX. It is not permitted to drop saftely 167 * requirments after the initial allocation. 164 168 * @param pszTag Allocation tag used for statistics and such. 165 169 */ -
trunk/src/VBox/Runtime/Makefile.kmk
r52021 r52050 585 585 generic/RTTimerLRCreate-generic.cpp \ 586 586 generic/mempool-generic.cpp \ 587 generic/memsafer-generic.cpp \588 587 generic/semfastmutex-generic.cpp \ 589 588 generic/semxroads-generic.cpp \ … … 599 598 r3/init.cpp \ 600 599 r3/isofs.cpp \ 600 r3/memsafer-r3.cpp \ 601 601 r3/path.cpp \ 602 602 r3/poll.cpp \ … … 1187 1187 RuntimeBldProg_BLD_TRG_ARCH := $(KBUILD_HOST_ARCH) 1188 1188 RuntimeBldProg_BLD_TRG_CPU := $(KBUILD_HOST_CPU) 1189 RuntimeBldProg_DEFS := $( RuntimeR3_DEFS) IPRT_WITHOUT_LDR_VERIFY1189 RuntimeBldProg_DEFS := $(filter-out IN_SUP_R3 IN_SUP, $(RuntimeR3_DEFS)) IPRT_WITHOUT_LDR_VERIFY RT_NO_GIP 1190 1190 RuntimeBldProg_SOURCES = $(filter-out \ 1191 1191 r3/xml.cpp \ -
trunk/src/VBox/Runtime/common/math/bignum.cpp
r52018 r52050 118 118 Assert(cbNew > cbOld); 119 119 120 void *pvNew = NULL;120 void *pvNew; 121 121 if (pBigNum->fSensitive) 122 { 123 int rc = RTMemSaferReallocZEx(cbOld, pBigNum->pauElements, cbNew, &pvNew, RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING); 124 Assert(VALID_PTR(pvNew) || RT_FAILURE(rc)); 125 } 122 pvNew = RTMemSaferReallocZ(cbOld, pBigNum->pauElements, cbNew); 126 123 else 127 124 pvNew = RTMemRealloc(pBigNum->pauElements, cbNew); … … 326 323 pBigNum->cAllocated = RT_ALIGN_32(pBigNum->cUsed, 4); 327 324 if (pBigNum->fSensitive) 328 { 329 int rc = RTMemSaferAllocZEx((void **)&pBigNum->pauElements, pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE, 330 RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING); 331 Assert(VALID_PTR(pBigNum->pauElements) || RT_FAILURE(rc)); 332 } 325 pBigNum->pauElements = (RTBIGNUMELEMENT *)RTMemSaferAllocZ(pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE); 333 326 else 334 327 pBigNum->pauElements = (RTBIGNUMELEMENT *)RTMemAlloc(pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE); … … 464 457 pBigNum->cAllocated = RT_ALIGN_32(pBigNum->cUsed, 4); 465 458 if (pBigNum->fSensitive) 466 { 467 rc = RTMemSaferAllocZEx((void **)&pBigNum->pauElements, pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE, 468 RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING); 469 Assert(VALID_PTR(pBigNum->pauElements) || RT_FAILURE(rc)); 470 } 459 pBigNum->pauElements = (RTBIGNUMELEMENT *)RTMemSaferAllocZ(pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE); 471 460 else 472 461 pBigNum->pauElements = (RTBIGNUMELEMENT *)RTMemAlloc(pBigNum->cAllocated * RTBIGNUM_ELEMENT_SIZE); -
trunk/src/VBox/Runtime/generic/memsafer-generic.cpp
r52033 r52050 34 34 #include <iprt/assert.h> 35 35 #include <iprt/string.h> 36 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP)37 # include <iprt/param.h>38 # include <VBox/sup.h>39 #endif /* IN_SUP_R3 */40 36 41 37 … … 50 46 #define RTMEMSAFER_PAD_AFTER 32 51 47 52 /*******************************************************************************53 * Structures and Typedefs *54 *******************************************************************************/55 56 /**57 * Supported allocation methods.58 */59 typedef enum RTMEMSAFERALLOCMETHOD60 {61 /** Invalid method. */62 RTMEMSAFERALLOCMETHOD_INVALID = 0,63 /** RTMem{Alloc|Free} methods, least secure!. */64 RTMEMSAFERALLOCMETHOD_RTMEM,65 /** Support library. */66 RTMEMSAFERALLOCMETHOD_SUPR3,67 /** 32bit hack. */68 RTMEMSAFERALLOCMETHOD_32BIT_HACK = 0x7fffffff69 } RTMEMSAFERALLOCMETHOD;70 /** Pointer to a allocation method enum. */71 typedef RTMEMSAFERALLOCMETHOD *PRTMEMSAFERALLOCMETHOD;72 73 /**74 * Memory header for safer memory allocations.75 *76 * @note: There is no magic value used deliberately to make identifying this structure77 * as hard as possible.78 */79 typedef struct RTMEMSAFERHDR80 {81 /** Flags passed to this allocation - used for freeing and reallocation. */82 uint32_t fFlags;83 /** Allocation method used. */84 RTMEMSAFERALLOCMETHOD enmAllocMethod;85 /** Amount of bytes allocated. */86 size_t cb;87 } RTMEMSAFERHDR;88 /** Pointer to a safer memory header. */89 typedef RTMEMSAFERHDR *PRTMEMSAFERHDR;90 /** Make sure we are staying in the padding area. */91 AssertCompile(sizeof(RTMEMSAFERHDR) < RTMEMSAFER_PAD_BEFORE);92 48 93 49 /******************************************************************************* 94 50 * Global Variables * 95 51 *******************************************************************************/ 96 /** XOR scra mbler value.52 /** XOR scrabler value. 97 53 * @todo determine this at runtime */ 98 54 #if ARCH_BITS == 32 … … 106 62 107 63 108 /**109 * Support (SUPR3) based allocator.110 *111 * @returns VBox status code.112 * @retval VERR_NOT_SUPPORTED if this allocation method is not supported in this113 * version of the library.114 * @param ppvNew Where to store the pointer to the new buffer on success.115 * @param cb Amount of bytes to allocate.116 *117 * @note: The allocation will have an extra page allocated before and after the118 * user area with all access rights removed if the host supports that to119 * prevent heartbleed like attacks.120 */121 static int rtMemSaferSupR3Alloc(void **ppvNew, size_t cb)122 {123 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP)124 /*125 * Allocate locked memory from the support library.126 *127 */128 size_t cbUser = RT_ALIGN_Z(cb, PAGE_SIZE);129 size_t cPages = cbUser / PAGE_SIZE + 2; /* For the extra guarding pages. */130 void *pvNew = NULL;131 int rc = SUPR3PageAllocEx(cPages, 0 /* fFlags */, &pvNew, NULL /* pR0Ptr */, NULL /* paPages */);132 if (RT_SUCCESS(rc))133 {134 /*135 * Change the memory protection of the pages guarding the allocation.136 * Some hosts don't support changing the page protection, ignore these137 * errors.138 */139 rc = SUPR3PageProtect(pvNew, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_NONE);140 if (RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED)141 {142 Assert(PAGE_SIZE + cbUser == (size_t)((uint32_t)(PAGE_SIZE + cbUser)));143 if (rc == VERR_NOT_SUPPORTED)144 rc = VINF_SUCCESS;145 else146 rc = SUPR3PageProtect(pvNew, NIL_RTR0PTR, PAGE_SIZE + (uint32_t)cbUser, PAGE_SIZE, RTMEM_PROT_NONE);147 148 if (RT_SUCCESS(rc))149 {150 *ppvNew = (uint8_t *)pvNew + PAGE_SIZE;151 return VINF_SUCCESS;152 }153 }154 155 rc = SUPR3PageFreeEx(pvNew, cPages);156 AssertRC(rc);157 }158 159 return rc;160 #else161 return VERR_NOT_SUPPORTED;162 #endif163 }164 165 166 /**167 * Free method for memory allocated using the Support (SUPR3) based allocator.168 *169 * @returns nothing.170 * @param pv Pointer to the memory to free.171 * @param cb Amount of bytes allocated.172 */173 static void rtMemSafeSupR3Free(void *pv, size_t cb)174 {175 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP)176 size_t cbUser = RT_ALIGN_Z(cb, PAGE_SIZE);177 size_t cPages = cbUser / PAGE_SIZE + 2; /* For the extra pages. */178 void *pvStart = (uint8_t *)pv - PAGE_SIZE;179 180 int rc = SUPR3PageFreeEx(pvStart, cPages);181 AssertRC(rc);182 #else183 AssertMsgFailed(("SUPR3 allocated memory but freeing is not supported, messed up\n"));184 #endif185 }186 187 188 64 RTDECL(int) RTMemSaferScramble(void *pv, size_t cb) 189 65 { 190 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pv - RTMEMSAFER_PAD_BEFORE); 191 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb)); 66 67 AssertMsg(*(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE) == cb, 68 ("*pvStart=%#zx cb=%#zx\n", *(size_t *)((char *)pv- RTMEMSAFER_PAD_BEFORE), cb)); 192 69 193 70 /* Note! This isn't supposed to be safe, just less obvious. */ … … 208 85 RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb) 209 86 { 210 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pv - RTMEMSAFER_PAD_BEFORE);211 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb));87 AssertMsg(*(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE) == cb, 88 ("*pvStart=%#zx cb=%#zx\n", *(size_t *)((char *)pv - RTMEMSAFER_PAD_BEFORE), cb)); 212 89 213 90 /* Note! This isn't supposed to be safe, just less obvious. */ … … 228 105 RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW 229 106 { 230 AssertReturn(cb, VERR_INVALID_PARAMETER);231 107 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER); 232 108 *ppvNew = NULL; 109 AssertReturn(cb, VERR_INVALID_PARAMETER); 233 110 234 111 /* 235 * Don't request zeroed memory. We want random heap garbage in the 236 * padding zones, nothing that makes our allocations easier to find. 112 * We support none of the hard requirements passed thru flags. 237 113 */ 238 RTMEMSAFERALLOCMETHOD enmAllocMethod = RTMEMSAFERALLOCMETHOD_SUPR3; 239 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN); 240 void *pvNew = NULL; 241 int rc = rtMemSaferSupR3Alloc(&pvNew, cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 242 if ( RT_FAILURE(rc) 243 && fFlags & RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING) 244 { 245 /* Pageable memory allowed. */ 246 enmAllocMethod = RTMEMSAFERALLOCMETHOD_RTMEM; 247 pvNew = RTMemAlloc(cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 248 } 249 250 if (pvNew) 251 { 252 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)pvNew; 253 pHdr->fFlags = fFlags; 254 pHdr->cb = cb; 255 pHdr->enmAllocMethod = enmAllocMethod; 256 #ifdef RT_STRICT /* For checking input in strict builds. */ 257 memset((char *)pvNew + sizeof(RTMEMSAFERHDR), 0xad, RTMEMSAFER_PAD_BEFORE - sizeof(RTMEMSAFERHDR)); 258 memset((char *)pvNew + RTMEMSAFER_PAD_BEFORE + cb, 0xda, RTMEMSAFER_PAD_AFTER + (cbUser - cb)); 114 if (fFlags == 0) 115 { 116 /* 117 * Don't request zeroed memory. We want random heap garbage in the 118 * padding zones, nothing that makes our allocations easier to find. 119 */ 120 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN); 121 void *pvNew = RTMemAlloc(cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 122 if (pvNew) 123 { 124 #ifdef RT_STRICT /* For checking input in string builds. */ 125 memset(pvNew, 0xad, RTMEMSAFER_PAD_BEFORE); 126 memset((char *)pvNew + RTMEMSAFER_PAD_BEFORE + cb, 0xda, RTMEMSAFER_PAD_AFTER + (cbUser - cb)); 127 *(size_t *)pvNew = cb; 259 128 #endif 260 129 261 void *pvUser = (char *)pvNew + RTMEMSAFER_PAD_BEFORE; 262 *ppvNew = pvUser; 263 264 /* You don't use this API for performance, so we always clean memory. */ 265 RT_BZERO(pvUser, cb); 266 267 return VINF_SUCCESS; 268 } 269 return rc; 130 void *pvUser = (char *)pvNew + RTMEMSAFER_PAD_BEFORE; 131 *ppvNew = pvUser; 132 133 /* You don't use this API for performance, so we always clean memory. */ 134 RT_BZERO(pvUser, cb); 135 136 return VINF_SUCCESS; 137 } 138 return VERR_NO_MEMORY; 139 } 140 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS); 141 return VWRN_UNABLE_TO_SATISFY_REQUIREMENTS; 270 142 } 271 143 RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag); … … 277 149 { 278 150 Assert(cb); 279 280 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN);281 151 void *pvStart = (char *)pv - RTMEMSAFER_PAD_BEFORE; 282 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)pvStart; 283 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb)); 284 152 AssertMsg(*(size_t *)pvStart == cb, ("*pvStart=%#zx cb=%#zx\n", *(size_t *)pvStart, cb)); 285 153 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3); 286 287 switch (pHdr->enmAllocMethod) 288 { 289 case RTMEMSAFERALLOCMETHOD_SUPR3: 290 rtMemSafeSupR3Free(pvStart, cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 291 break; 292 case RTMEMSAFERALLOCMETHOD_RTMEM: 293 RTMemFree(pvStart); 294 break; 295 default: 296 AssertMsgFailed(("Invalid allocation method, corrupted header\n")); 297 } 154 RTMemFree(pvStart); 298 155 } 299 156 else … … 314 171 if (cbNew && cbOld) 315 172 { 316 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pvOld - RTMEMSAFER_PAD_BEFORE);317 173 AssertPtr(pvOld); 318 174 AssertMsg(*(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE) == cbOld, 319 175 ("*pvStart=%#zx cbOld=%#zx\n", *(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE), cbOld)); 320 176 177 /* 178 * We support none of the hard requirements passed thru flags. 179 */ 321 180 void *pvNew; 322 rc = RTMemSaferAllocZExTag(&pvNew, cbNew, pHdr->fFlags, pszTag);181 rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag); 323 182 if (RT_SUCCESS(rc)) 324 183 { … … 348 207 { 349 208 void *pvNew = NULL; 350 int rc = RTMemSaferAllocZExTag(&pvNew, cb, RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT, pszTag);209 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag); 351 210 if (RT_SUCCESS(rc)) 352 211 return pvNew; … … 359 218 { 360 219 void *pvNew = NULL; 361 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT, pszTag);220 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag); 362 221 if (RT_SUCCESS(rc)) 363 222 return pvNew; -
trunk/src/VBox/Runtime/r3/memsafer-r3.cpp
r52033 r52050 32 32 #include <iprt/memsafer.h> 33 33 34 #include <iprt/asm.h> 34 35 #include <iprt/assert.h> 36 #include <iprt/avl.h> 37 #include <iprt/critsect.h> 38 #include <iprt/mem.h> 39 #include <iprt/once.h> 40 #include <iprt/rand.h> 41 #include <iprt/param.h> 35 42 #include <iprt/string.h> 36 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP) 37 # include <iprt/param.h> 43 #ifdef IN_SUP_R3 38 44 # include <VBox/sup.h> 39 #endif /* IN_SUP_R3 */45 #endif 40 46 41 47 … … 43 49 * Defined Constants And Macros * 44 50 *******************************************************************************/ 45 /** Allocation size alignment . */51 /** Allocation size alignment (power of two). */ 46 52 #define RTMEMSAFER_ALIGN 16 47 /** Padding after the block to avoid small overruns. */ 48 #define RTMEMSAFER_PAD_BEFORE 96 49 /** Padding after the block to avoid small underruns. */ 50 #define RTMEMSAFER_PAD_AFTER 32 53 51 54 52 55 /******************************************************************************* 53 56 * Structures and Typedefs * 54 57 *******************************************************************************/ 55 56 /** 57 * Supported allocation methods. 58 */ 59 typedef enum RTMEMSAFERALLOCMETHOD 58 /** 59 * Allocators. 60 */ 61 typedef enum RTMEMSAFERALLOCATOR 60 62 { 61 63 /** Invalid method. */ 62 RTMEMSAFERALLOCMETHOD_INVALID = 0, 63 /** RTMem{Alloc|Free} methods, least secure!. */ 64 RTMEMSAFERALLOCMETHOD_RTMEM, 65 /** Support library. */ 66 RTMEMSAFERALLOCMETHOD_SUPR3, 67 /** 32bit hack. */ 68 RTMEMSAFERALLOCMETHOD_32BIT_HACK = 0x7fffffff 69 } RTMEMSAFERALLOCMETHOD; 70 /** Pointer to a allocation method enum. */ 71 typedef RTMEMSAFERALLOCMETHOD *PRTMEMSAFERALLOCMETHOD; 72 73 /** 74 * Memory header for safer memory allocations. 75 * 76 * @note: There is no magic value used deliberately to make identifying this structure 77 * as hard as possible. 78 */ 79 typedef struct RTMEMSAFERHDR 80 { 81 /** Flags passed to this allocation - used for freeing and reallocation. */ 82 uint32_t fFlags; 83 /** Allocation method used. */ 84 RTMEMSAFERALLOCMETHOD enmAllocMethod; 85 /** Amount of bytes allocated. */ 86 size_t cb; 87 } RTMEMSAFERHDR; 88 /** Pointer to a safer memory header. */ 89 typedef RTMEMSAFERHDR *PRTMEMSAFERHDR; 90 /** Make sure we are staying in the padding area. */ 91 AssertCompile(sizeof(RTMEMSAFERHDR) < RTMEMSAFER_PAD_BEFORE); 64 RTMEMSAFERALLOCATOR_INVALID = 0, 65 /** RTMemPageAlloc. */ 66 RTMEMSAFERALLOCATOR_RTMEMPAGE, 67 /** SUPR3PageAllocEx. */ 68 RTMEMSAFERALLOCATOR_SUPR3 69 } RTMEMSAFERALLOCATOR; 70 71 /** 72 * Tracking node (lives on normal heap). 73 */ 74 typedef struct RTMEMSAFERNODE 75 { 76 /** Node core. 77 * The core key is a scrambled pointer the user memory. */ 78 AVLPVNODECORE Core; 79 /** The allocation flags. */ 80 uint32_t fFlags; 81 /** The offset into the allocation of the user memory. */ 82 uint32_t offUser; 83 /** The requested allocation size. */ 84 size_t cbUser; 85 /** The allocation size in pages, this includes the two guard pages. */ 86 uint32_t cPages; 87 /** The allocator used for this node. */ 88 RTMEMSAFERALLOCATOR enmAllocator; 89 } RTMEMSAFERNODE; 90 /** Pointer to an allocation tracking node. */ 91 typedef RTMEMSAFERNODE *PRTMEMSAFERNODE; 92 92 93 93 94 /******************************************************************************* 94 95 * Global Variables * 95 96 *******************************************************************************/ 96 /** XOR scrambler value. 97 * @todo determine this at runtime */ 98 #if ARCH_BITS == 32 99 static uintptr_t g_uScramblerXor = UINT32_C(0x867af88d); 100 #elif ARCH_BITS == 64 101 static uintptr_t g_uScramblerXor = UINT64_C(0xed95ecc99416d312); 97 /** Init once structure for this module. */ 98 static RTONCE g_MemSaferOnce = RTONCE_INITIALIZER; 99 /** Critical section protecting the allocation tree. */ 100 static RTCRITSECTRW g_MemSaferCritSect; 101 /** Tree of allocation nodes. */ 102 static AVLPVTREE g_pMemSaferTree; 103 /** XOR scrambler value for memory. */ 104 static uintptr_t g_uMemSaferScramblerXor; 105 /** XOR scrambler value pointers. */ 106 static uintptr_t g_uMemSaferPtrScramblerXor; 107 /** Pointer rotate shift count.*/ 108 static uintptr_t g_cMemSaferPtrScramblerRotate; 109 110 111 /** 112 * @callback_method_impl{FNRTONCE, Inits globals.} 113 */ 114 static DECLCALLBACK(int32_t) rtMemSaferOnceInit(void *pvUserIgnore) 115 { 116 g_uMemSaferScramblerXor = (uintptr_t)RTRandU64(); 117 g_uMemSaferPtrScramblerXor = (uintptr_t)RTRandU64(); 118 g_cMemSaferPtrScramblerRotate = RTRandU32Ex(0, ARCH_BITS - 1); 119 return RTCritSectRwInit(&g_MemSaferCritSect); 120 } 121 122 123 /** 124 * @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.} 125 */ 126 static DECLCALLBACK(void) rtMemSaferOnceTerm(void *pvUser, bool fLazyCleanUpOk) 127 { 128 if (!fLazyCleanUpOk) 129 { 130 RTCritSectRwDelete(&g_MemSaferCritSect); 131 Assert(!g_pMemSaferTree); 132 } 133 } 134 135 136 137 DECLINLINE(void *) rtMemSaferScramblePointer(void *pvUser) 138 { 139 uintptr_t uPtr = (uintptr_t)pvUser; 140 uPtr ^= g_uMemSaferPtrScramblerXor; 141 #if ARCH_BITS == 64 142 uPtr = ASMRotateRightU64(uPtr, g_cMemSaferPtrScramblerRotate); 143 #elif ARCH_BITS == 32 144 uPtr = ASMRotateRightU32(uPtr, g_cMemSaferPtrScramblerRotate); 102 145 #else 103 # error " Bad ARCH_BITS value"146 # error "Unsupported/missing ARCH_BITS." 104 147 #endif 105 106 107 108 /** 109 * Support (SUPR3) based allocator. 110 * 111 * @returns VBox status code.112 * @ retval VERR_NOT_SUPPORTED if this allocation method is not supported in this113 * version of the library.114 * @param ppvNew Where to store the pointer to the new buffer on success. 115 * @param cb Amount of bytes to allocate. 116 *117 * @note: The allocation will have an extra page allocated before and after the118 * user area with all access rights removed if the host supports that to119 * prevent heartbleed like attacks.120 */121 static int rtMemSaferSupR3Alloc(void **ppvNew, size_t cb) 122 { 123 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP) 124 /*125 * Allocate locked memory from the support library.126 *127 */128 size_t cbUser = RT_ALIGN_Z(cb, PAGE_SIZE);129 size_t cPages = cbUser / PAGE_SIZE + 2; /* For the extra guarding pages.*/130 void *pvNew = NULL; 131 int rc = SUPR3PageAllocEx(cPages, 0 /* fFlags */, &pvNew, NULL /* pR0Ptr */, NULL /* paPages */); 132 if (RT_SUCCESS(rc))133 {134 /*135 * Change the memory protection of the pages guarding the allocation.136 * Some hosts don't support changing the page protection, ignore these137 * errors. 138 */ 139 rc = SUPR3PageProtect(pvNew, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_NONE); 140 if (RT_SUCCESS(rc) || rc == VERR_NOT_SUPPORTED) 141 {142 Assert(PAGE_SIZE + cbUser == (size_t)((uint32_t)(PAGE_SIZE + cbUser)));143 if (rc == VERR_NOT_SUPPORTED)144 rc = VINF_SUCCESS;145 else146 rc = SUPR3PageProtect(pvNew, NIL_RTR0PTR, PAGE_SIZE + (uint32_t)cbUser, PAGE_SIZE, RTMEM_PROT_NONE); 147 148 if (RT_SUCCESS(rc))149 {150 *ppvNew = (uint8_t *)pvNew + PAGE_SIZE;151 return VINF_SUCCESS;152 }153 154 155 rc = SUPR3PageFreeEx(pvNew, cPages); 156 AssertRC(rc); 157 } 158 159 return rc;160 #else 161 return VERR_NOT_SUPPORTED;148 return (void *)uPtr; 149 } 150 151 152 /** 153 * Inserts a tracking node into the tree. 154 * 155 * @param pThis The allocation tracking node to insert. 156 */ 157 static void rtMemSaferNodeInsert(PRTMEMSAFERNODE pThis) 158 { 159 RTCritSectRwEnterExcl(&g_MemSaferCritSect); 160 pThis->Core.Key = rtMemSaferScramblePointer(pThis->Core.Key); 161 bool fRc = RTAvlPVInsert(&g_pMemSaferTree, &pThis->Core); 162 RTCritSectRwLeaveExcl(&g_MemSaferCritSect); 163 Assert(fRc); 164 } 165 166 167 /** 168 * Finds a tracking node into the tree. 169 * 170 * @returns The allocation tracking node for @a pvUser. NULL if not found. 171 * @param pvUser The user pointer to the allocation. 172 */ 173 static PRTMEMSAFERNODE rtMemSaferNodeLookup(void *pvUser) 174 { 175 void *pvKey = rtMemSaferScramblePointer(pvUser); 176 RTCritSectRwEnterShared(&g_MemSaferCritSect); 177 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVGet(&g_pMemSaferTree, pvKey); 178 RTCritSectRwLeaveShared(&g_MemSaferCritSect); 179 return pThis; 180 } 181 182 183 /** 184 * Removes a tracking node from the tree. 185 * 186 * @returns The allocation tracking node for @a pvUser. NULL if not found. 187 * @param pvUser The user pointer to the allocation. 188 */ 189 static PRTMEMSAFERNODE rtMemSaferNodeRemove(void *pvUser) 190 { 191 void *pvKey = rtMemSaferScramblePointer(pvUser); 192 RTCritSectRwEnterExcl(&g_MemSaferCritSect); 193 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTAvlPVRemove(&g_pMemSaferTree, pvKey); 194 RTCritSectRwLeaveExcl(&g_MemSaferCritSect); 195 return pThis; 196 } 197 198 199 RTDECL(int) RTMemSaferScramble(void *pv, size_t cb) 200 { 201 #ifdef RT_STRICT 202 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv); 203 AssertReturn(pThis, VERR_INVALID_POINTER); 204 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER); 162 205 #endif 163 }164 165 166 /**167 * Free method for memory allocated using the Support (SUPR3) based allocator.168 *169 * @returns nothing.170 * @param pv Pointer to the memory to free.171 * @param cb Amount of bytes allocated.172 */173 static void rtMemSafeSupR3Free(void *pv, size_t cb)174 {175 #if defined(IN_SUP_R3) && defined(VBOX) && !defined(RT_NO_GIP)176 size_t cbUser = RT_ALIGN_Z(cb, PAGE_SIZE);177 size_t cPages = cbUser / PAGE_SIZE + 2; /* For the extra pages. */178 void *pvStart = (uint8_t *)pv - PAGE_SIZE;179 180 int rc = SUPR3PageFreeEx(pvStart, cPages);181 AssertRC(rc);182 #else183 AssertMsgFailed(("SUPR3 allocated memory but freeing is not supported, messed up\n"));184 #endif185 }186 187 188 RTDECL(int) RTMemSaferScramble(void *pv, size_t cb)189 {190 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pv - RTMEMSAFER_PAD_BEFORE);191 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb));192 206 193 207 /* Note! This isn't supposed to be safe, just less obvious. */ … … 196 210 while (cb > 0) 197 211 { 198 *pu ^= g_u ScramblerXor;212 *pu ^= g_uMemSaferScramblerXor; 199 213 pu++; 200 214 cb -= sizeof(*pu); … … 208 222 RTDECL(int) RTMemSaferUnscramble(void *pv, size_t cb) 209 223 { 210 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pv - RTMEMSAFER_PAD_BEFORE); 211 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb)); 224 #ifdef RT_STRICT 225 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pv); 226 AssertReturn(pThis, VERR_INVALID_POINTER); 227 AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER); 228 #endif 212 229 213 230 /* Note! This isn't supposed to be safe, just less obvious. */ … … 216 233 while (cb > 0) 217 234 { 218 *pu ^= g_u ScramblerXor;235 *pu ^= g_uMemSaferScramblerXor; 219 236 pu++; 220 237 cb -= sizeof(*pu); … … 226 243 227 244 245 /** 246 * Initializes the pages. 247 * 248 * Fills the memory with random bytes in order to make it less obvious where the 249 * secret data starts and ends. We also zero the user memory in case the 250 * allocator does not do this. 251 * 252 * @param pThis The allocation tracer node. The Core.Key member 253 * will be set. 254 * @param pvPages The pages to initialize. 255 */ 256 static void rtMemSaferInitializePages(PRTMEMSAFERNODE pThis, void *pvPages) 257 { 258 RTRandBytes(pvPages, PAGE_SIZE + pThis->offUser); 259 260 uint8_t *pbUser = (uint8_t *)pvPages + PAGE_SIZE + pThis->offUser; 261 pThis->Core.Key = pbUser; 262 RT_BZERO(pbUser, pThis->cbUser); /* paranoia */ 263 264 RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * PAGE_SIZE - PAGE_SIZE - pThis->offUser - pThis->cbUser); 265 } 266 267 268 /** 269 * Allocates and initializes pages from the support driver and initializes it. 270 * 271 * @returns VBox status code. 272 * @param pThis The allocator node. Core.Key will be set on successful 273 * return (unscrambled). 274 */ 275 static int rtMemSaferSupR3AllocPages(PRTMEMSAFERNODE pThis) 276 { 277 #ifdef IN_SUP_R3 278 /* 279 * Try allocate the memory. 280 */ 281 void *pvPages; 282 int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */); 283 if (RT_SUCCESS(rc)) 284 { 285 rtMemSaferInitializePages(pThis, pvPages); 286 287 /* 288 * Configure the guard pages. 289 * SUPR3PageProtect isn't supported on all hosts, we ignore that. 290 */ 291 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_NONE); 292 if (RT_SUCCESS(rc)) 293 { 294 rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - PAGE_SIZE) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 295 if (RT_SUCCESS(rc)) 296 return VINF_SUCCESS; 297 SUPR3PageProtect(pvPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 298 } 299 else if (rc == VERR_NOT_SUPPORTED) 300 return VINF_SUCCESS; 301 302 /* failed. */ 303 int rc2 = SUPR3PageFreeEx(pvPages, pThis->cPages); AssertRC(rc2); 304 } 305 return rc; 306 #else /* !IN_SUP_R3 */ 307 return VERR_NOT_SUPPORTED; 308 #endif /* !IN_SUP_R3 */ 309 } 310 311 312 /** 313 * Allocates and initializes pages using the IPRT page allocator API. 314 * 315 * @returns VBox status code. 316 * @param pThis The allocator node. Core.Key will be set on successful 317 * return (unscrambled). 318 */ 319 static int rtMemSaferMemAllocPages(PRTMEMSAFERNODE pThis) 320 { 321 /* 322 * Try allocate the memory. 323 */ 324 int rc; 325 void *pvPages = RTMemPageAlloc((size_t)pThis->cPages * PAGE_SIZE); 326 if (pvPages) 327 { 328 rtMemSaferInitializePages(pThis, pvPages); 329 330 /* 331 * Configure the guard pages. 332 */ 333 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_NONE); 334 if (RT_SUCCESS(rc)) 335 { 336 rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE); 337 if (RT_SUCCESS(rc)) 338 return VINF_SUCCESS; 339 rc = RTMemProtect(pvPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 340 } 341 342 /* failed. */ 343 RTMemPageFree(pvPages, (size_t)pThis->cPages * PAGE_SIZE); 344 } 345 346 return rc; 347 } 348 349 228 350 RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW 229 351 { 230 AssertReturn(cb, VERR_INVALID_PARAMETER); 352 /* 353 * Validate input. 354 */ 231 355 AssertPtrReturn(ppvNew, VERR_INVALID_PARAMETER); 232 356 *ppvNew = NULL; 357 AssertReturn(cb, VERR_INVALID_PARAMETER); 358 AssertReturn(cb <= 32U*_1M - PAGE_SIZE * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */ 359 AssertReturn(!(fFlags & ~RTMEMSAFER_F_VALID_MASK), VERR_INVALID_FLAGS); 233 360 234 361 /* 235 * Don't request zeroed memory. We want random heap garbage in the 236 * padding zones, nothing that makes our allocations easier to find. 362 * Initialize globals. 237 363 */ 238 RTMEMSAFERALLOCMETHOD enmAllocMethod = RTMEMSAFERALLOCMETHOD_SUPR3; 239 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN); 240 void *pvNew = NULL; 241 int rc = rtMemSaferSupR3Alloc(&pvNew, cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 242 if ( RT_FAILURE(rc) 243 && fFlags & RTMEMSAFER_ALLOC_EX_ALLOW_PAGEABLE_BACKING) 244 { 245 /* Pageable memory allowed. */ 246 enmAllocMethod = RTMEMSAFERALLOCMETHOD_RTMEM; 247 pvNew = RTMemAlloc(cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 248 } 249 250 if (pvNew) 251 { 252 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)pvNew; 253 pHdr->fFlags = fFlags; 254 pHdr->cb = cb; 255 pHdr->enmAllocMethod = enmAllocMethod; 256 #ifdef RT_STRICT /* For checking input in strict builds. */ 257 memset((char *)pvNew + sizeof(RTMEMSAFERHDR), 0xad, RTMEMSAFER_PAD_BEFORE - sizeof(RTMEMSAFERHDR)); 258 memset((char *)pvNew + RTMEMSAFER_PAD_BEFORE + cb, 0xda, RTMEMSAFER_PAD_AFTER + (cbUser - cb)); 364 int rc = RTOnceEx(&g_MemSaferOnce, rtMemSaferOnceInit, rtMemSaferOnceTerm, NULL); 365 if (RT_SUCCESS(rc)) 366 { 367 /* 368 * Allocate a tracker node first. 369 */ 370 PRTMEMSAFERNODE pThis = (PRTMEMSAFERNODE)RTMemAllocZ(sizeof(RTMEMSAFERNODE)); 371 if (pThis) 372 { 373 /* 374 * Prepare the allocation. 375 */ 376 pThis->cbUser = cb; 377 pThis->offUser = (RTRandU32Ex(0, 128) * RTMEMSAFER_ALIGN) & PAGE_OFFSET_MASK; 378 379 size_t cbNeeded = pThis->offUser + pThis->cbUser; 380 cbNeeded = RT_ALIGN_Z(cbNeeded, PAGE_SIZE); 381 382 pThis->cPages = (uint32_t)(cbNeeded / PAGE_SIZE) + 2; /* +2 for guard pages */ 383 384 /* 385 * Try allocate the memory, using the best allocator by default and 386 * falling back on the less safe one. 387 */ 388 rc = rtMemSaferSupR3AllocPages(pThis); 389 if (RT_SUCCESS(rc)) 390 pThis->enmAllocator = RTMEMSAFERALLOCATOR_SUPR3; 391 else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE)) 392 { 393 rc = rtMemSaferMemAllocPages(pThis); 394 if (RT_SUCCESS(rc)) 395 pThis->enmAllocator = RTMEMSAFERALLOCATOR_RTMEMPAGE; 396 } 397 if (RT_SUCCESS(rc)) 398 { 399 /* 400 * Insert the node. 401 */ 402 *ppvNew = pThis->Core.Key; 403 rtMemSaferNodeInsert(pThis); /* (Scrambles Core.Key) */ 404 return VINF_SUCCESS; 405 } 406 407 RTMemFree(pThis); 408 } 409 else 410 rc = VERR_NO_MEMORY; 411 } 412 return rc; 413 } 414 RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag); 415 416 417 RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW 418 { 419 if (pv) 420 { 421 PRTMEMSAFERNODE pThis = rtMemSaferNodeRemove(pv); 422 AssertReturnVoid(pThis); 423 AssertMsg(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser)); 424 425 /* 426 * Wipe the user memory first. 427 */ 428 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3); 429 430 /* 431 * Free the pages. 432 */ 433 uint8_t *pbPages = (uint8_t *)pv - pThis->offUser - PAGE_SIZE; 434 size_t cbPages = (size_t)pThis->cPages * PAGE_SIZE; 435 switch (pThis->enmAllocator) 436 { 437 #ifdef IN_SUP_R3 438 case RTMEMSAFERALLOCATOR_SUPR3: 439 SUPR3PageProtect(pbPages, NIL_RTR0PTR, 0, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 440 SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - PAGE_SIZE), PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 441 SUPR3PageFreeEx(pbPages, cbPages); 442 break; 259 443 #endif 260 261 void *pvUser = (char *)pvNew + RTMEMSAFER_PAD_BEFORE; 262 *ppvNew = pvUser; 263 264 /* You don't use this API for performance, so we always clean memory. */ 265 RT_BZERO(pvUser, cb); 266 267 return VINF_SUCCESS; 268 } 269 return rc; 270 } 271 RT_EXPORT_SYMBOL(RTMemSaferAllocZExTag); 272 273 274 RTDECL(void) RTMemSaferFree(void *pv, size_t cb) RT_NO_THROW 275 { 276 if (pv) 277 { 278 Assert(cb); 279 280 size_t cbUser = RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN); 281 void *pvStart = (char *)pv - RTMEMSAFER_PAD_BEFORE; 282 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)pvStart; 283 AssertMsg(pHdr->cb == cb, ("pHdr->cb=%#zx cb=%#zx\n", pHdr->cb, cb)); 284 285 RTMemWipeThoroughly(pv, RT_ALIGN_Z(cb, RTMEMSAFER_ALIGN), 3); 286 287 switch (pHdr->enmAllocMethod) 288 { 289 case RTMEMSAFERALLOCMETHOD_SUPR3: 290 rtMemSafeSupR3Free(pvStart, cbUser + RTMEMSAFER_PAD_BEFORE + RTMEMSAFER_PAD_AFTER); 444 case RTMEMSAFERALLOCATOR_RTMEMPAGE: 445 RTMemProtect(pbPages, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 446 RTMemProtect(pbPages + cbPages - PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE); 447 RTMemPageFree(pbPages, cbPages); 291 448 break; 292 case RTMEMSAFERALLOCMETHOD_RTMEM: 293 RTMemFree(pvStart); 294 break; 449 295 450 default: 296 Assert MsgFailed(("Invalid allocation method, corrupted header\n"));451 AssertFailed(); 297 452 } 453 454 /* 455 * Free the tracking node. 456 */ 457 pThis->Core.Key = NULL; 458 pThis->offUser = 0; 459 pThis->cbUser = 0; 460 RTMemFree(pThis); 298 461 } 299 462 else … … 303 466 304 467 468 /** 469 * The simplest reallocation method: allocate new block, copy over the data, 470 * free old block. 471 */ 472 static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) 473 { 474 void *pvNew; 475 int rc = RTMemSaferAllocZExTag(&pvNew, cbNew, fFlags, pszTag); 476 if (RT_SUCCESS(rc)) 477 { 478 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld)); 479 RTMemSaferFree(pvOld, cbOld); 480 *ppvNew = pvNew; 481 } 482 return rc; 483 } 484 485 305 486 RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW 306 487 { 307 /*308 * We cannot let the heap move us around because we will be failing in our309 * duty to clean things up. So, allocate a new block, copy over the old310 * content, and free the old one.311 */312 488 int rc; 313 489 /* Real realloc. */ 314 490 if (cbNew && cbOld) 315 491 { 316 PRTMEMSAFERHDR pHdr = (PRTMEMSAFERHDR)((char *)pvOld - RTMEMSAFER_PAD_BEFORE); 317 AssertPtr(pvOld); 318 AssertMsg(*(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE) == cbOld, 319 ("*pvStart=%#zx cbOld=%#zx\n", *(size_t *)((char *)pvOld - RTMEMSAFER_PAD_BEFORE), cbOld)); 320 321 void *pvNew; 322 rc = RTMemSaferAllocZExTag(&pvNew, cbNew, pHdr->fFlags, pszTag); 323 if (RT_SUCCESS(rc)) 492 PRTMEMSAFERNODE pThis = rtMemSaferNodeLookup(pvOld); 493 AssertReturn(pThis, VERR_INVALID_POINTER); 494 AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser); 495 496 if (pThis->fFlags == fFlags) 324 497 { 325 memcpy(pvNew, pvOld, RT_MIN(cbNew, cbOld)); 326 RTMemSaferFree(pvOld, cbOld); 327 *ppvNew = pvNew; 498 if (cbNew > cbOld) 499 { 500 /* 501 * Is the enough room for us to grow? 502 */ 503 size_t cbMax = (size_t)(pThis->cPages - 2) * PAGE_SIZE; 504 if (cbNew <= cbMax) 505 { 506 size_t const cbAdded = (cbNew - cbOld); 507 size_t const cbAfter = cbMax - pThis->offUser - cbOld; 508 if (cbAfter >= cbAdded) 509 { 510 /* 511 * Sufficient space after the current allocation. 512 */ 513 uint8_t *pbNewSpace = (uint8_t *)pvOld + cbOld; 514 RT_BZERO(pbNewSpace, cbAdded); 515 *ppvNew = pvOld; 516 } 517 else 518 { 519 /* 520 * Have to move the allocation to make enough room at the 521 * end. In order to make it a little less predictable and 522 * maybe avoid a relocation or two in the next call, divide 523 * the page offset by four until it it fits. 524 */ 525 AssertReturn(rtMemSaferNodeRemove(pvOld) == pThis, VERR_INTERNAL_ERROR_3); 526 uint32_t offNewUser = pThis->offUser; 527 do 528 offNewUser = offNewUser / 2; 529 while ((pThis->offUser - offNewUser) + cbAfter < cbAdded); 530 offNewUser &= ~(RTMEMSAFER_ALIGN - 1U); 531 532 uint32_t const cbMove = pThis->offUser - offNewUser; 533 uint8_t *pbNew = (uint8_t *)pvOld - cbMove; 534 memmove(pbNew, pvOld, cbOld); 535 536 RT_BZERO(pbNew + cbOld, cbAdded); 537 if (cbMove > cbAdded) 538 RTMemWipeThoroughly(pbNew + cbNew, cbMove - cbAdded, 3); 539 540 pThis->offUser = offNewUser; 541 pThis->Core.Key = pbNew; 542 *ppvNew = pbNew; 543 544 rtMemSaferNodeInsert(pThis); 545 } 546 Assert(((uintptr_t)*ppvNew & PAGE_OFFSET_MASK) == pThis->offUser); 547 pThis->cbUser = cbNew; 548 rc = VINF_SUCCESS; 549 } 550 else 551 { 552 /* 553 * Not enough space, allocate a new block and copy over the data. 554 */ 555 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag); 556 } 557 } 558 else 559 { 560 /* 561 * Shrinking the allocation, just wipe the memory that is no longer 562 * being used. 563 */ 564 if (cbNew != cbOld) 565 { 566 uint8_t *pbAbandond = (uint8_t *)pvOld + cbNew; 567 RTMemWipeThoroughly(pbAbandond, cbOld - cbNew, 3); 568 } 569 pThis->cbUser = cbNew; 570 *ppvNew = pvOld; 571 rc = VINF_SUCCESS; 572 } 328 573 } 329 } 330 /* First allocation. */ 574 else if (!pThis->fFlags) 575 { 576 /* 577 * New flags added. Allocate a new block and copy over the old one. 578 */ 579 rc = rtMemSaferReallocSimpler(cbOld, pvOld, cbNew, ppvNew, fFlags, pszTag); 580 } 581 else 582 { 583 /* Compatible flags. */ 584 AssertMsgFailed(("fFlags=%#x old=%#x\n", fFlags, pThis->fFlags)); 585 rc = VERR_INVALID_FLAGS; 586 } 587 } 588 /* 589 * First allocation. Pass it on. 590 */ 331 591 else if (!cbOld) 332 592 { … … 334 594 rc = RTMemSaferAllocZExTag(ppvNew, cbNew, fFlags, pszTag); 335 595 } 336 /* Free operation*/ 596 /* 597 * Free operation. Pass it on. 598 */ 337 599 else 338 600 { 339 601 RTMemSaferFree(pvOld, cbOld); 602 *ppvNew = NULL; 340 603 rc = VINF_SUCCESS; 341 604 } … … 348 611 { 349 612 void *pvNew = NULL; 350 int rc = RTMemSaferAllocZExTag(&pvNew, cb, RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT, pszTag);613 int rc = RTMemSaferAllocZExTag(&pvNew, cb, 0 /*fFlags*/, pszTag); 351 614 if (RT_SUCCESS(rc)) 352 615 return pvNew; … … 359 622 { 360 623 void *pvNew = NULL; 361 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT, pszTag);624 int rc = RTMemSaferReallocZExTag(cbOld, pvOld, cbNew, &pvNew, 0 /*fFlags*/, pszTag); 362 625 if (RT_SUCCESS(rc)) 363 626 return pvNew; -
trunk/src/VBox/Runtime/testcase/tstRTMemSafer.cpp
r52018 r52050 29 29 * Header Files * 30 30 *******************************************************************************/ 31 #include <iprt/path.h> 31 #include <iprt/memsafer.h> 32 33 #include <iprt/asm.h> 34 #include <iprt/param.h> 32 35 #include <iprt/rand.h> 33 36 #include <iprt/string.h> 34 #include <iprt/stream.h>35 #include <iprt/initterm.h>36 #include <iprt/param.h>37 #include <iprt/memsafer.h>38 37 #include <iprt/test.h> 38 #ifdef VBOX 39 # include <VBox/sup.h> 40 #endif 39 41 40 42 41 /*******************************************************************************42 * Global Variables *43 *******************************************************************************/44 43 45 44 static void doMemSaferScramble(RTTEST hTest, void *pvBuf, size_t cbAlloc) 46 45 { 47 RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "Testing scrambling (%u bytes) ...\n", cbAlloc); 48 46 /* 47 * Fill it with random bytes and make a reference copy of these. 48 */ 49 49 RTRandBytes(pvBuf, cbAlloc); 50 50 51 51 void *pvRef = RTMemDup(pvBuf, cbAlloc); 52 if (!pvRef) 53 { 54 RTTestIFailed("No memory for reference buffer (%z bytes)\n", cbAlloc); 55 return; 56 } 52 RTTESTI_CHECK_RETV(pvRef); 57 53 54 /* 55 * Scramble the allocation and check that it no longer matches the refernece bytes. 56 */ 58 57 int rc = RTMemSaferScramble(pvBuf, cbAlloc); 59 58 if (RT_SUCCESS(rc)) … … 64 63 else 65 64 { 66 /* Test unscrambling. */ 65 /* 66 * Check that unscrambling returns the original content. 67 */ 67 68 rc = RTMemSaferUnscramble(pvBuf, cbAlloc); 68 69 if (RT_SUCCESS(rc)) … … 82 83 } 83 84 85 84 86 static void doMemSaferAllocation(RTTEST hTest) 85 87 { 86 88 size_t cbAlloc = RTRandS32Ex(1, _1M) * sizeof(uint8_t); 87 89 88 RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "Testing allocation of secure memory (%u bytes) ...\n", cbAlloc);89 90 void *pvBuf = NULL; 90 int rc = RTMemSaferAllocZEx(&pvBuf, cbAlloc, RTMEMSAFER_ALLOC_EX_FLAGS_DEFAULT);91 int rc = RTMemSaferAllocZEx(&pvBuf, cbAlloc, 0); 91 92 if (RT_SUCCESS(rc)) 92 93 { 93 /* Try to access memory. */94 /* Fill it with random bytes. */ 94 95 RTRandBytes(pvBuf, cbAlloc); 95 96 … … 97 98 doMemSaferScramble(hTest, pvBuf, cbAlloc); 98 99 99 #if 0100 /* Try to access memory after the allocation, should crash. */101 size_t cbAllocAligned = RT_ALIGN_Z(cbAlloc, PAGE_SIZE);102 *((uint8_t *)pvBuf + cbAllocAligned) = 0xcc;103 #endif104 100 RTMemSaferFree(pvBuf, cbAlloc); 105 101 } … … 108 104 } 109 105 106 107 static void doMemRealloc(RTTEST hTest) 108 { 109 RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "%u reallocation, grow by 1 bytes\n", PAGE_SIZE * 2); 110 size_t cbAlloc = RTRandS32Ex(1, _16K); 111 void *pvBuf = NULL; 112 RTTESTI_CHECK_RC_OK_RETV(RTMemSaferAllocZEx(&pvBuf, cbAlloc, 0)); 113 for (uint32_t i = 0; i <= PAGE_SIZE * 2; i++) 114 { 115 cbAlloc += 1; 116 RTTESTI_CHECK_RC_OK_RETV(RTMemSaferReallocZEx(cbAlloc - 1, pvBuf, cbAlloc, &pvBuf, 0)); 117 memset(pvBuf, i & 0x7f, cbAlloc); 118 } 119 RTMemSaferFree(pvBuf, cbAlloc); 120 121 122 RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "100 random reallocations\n"); 123 uint8_t chFiller = 0x42; 124 cbAlloc = 0; 125 pvBuf = NULL; 126 for (uint32_t i = 1; i <= 100; i++) 127 { 128 uint32_t cbNew = RTRandS32Ex(1, _16K + (i / 4) * _16K); 129 RTTESTI_CHECK_RC_OK_RETV(RTMemSaferReallocZEx(cbAlloc, pvBuf, cbNew, &pvBuf, 0)); 130 131 RTTESTI_CHECK(ASMMemIsAll8(pvBuf, RT_MIN(cbAlloc, cbNew), chFiller) == NULL); 132 133 chFiller += 0x31; 134 memset(pvBuf, chFiller, cbNew); 135 cbAlloc = cbNew; 136 } 137 RTTESTI_CHECK_RC_OK_RETV(RTMemSaferReallocZEx(cbAlloc, pvBuf, 0, &pvBuf, 0)); 138 RTTESTI_CHECK(pvBuf == NULL); 139 } 140 141 110 142 int main() 111 143 { 112 144 RTTEST hTest; 113 RTEXITCODE rcExit = RTTestInitAndCreate(" memsafer", &hTest);145 RTEXITCODE rcExit = RTTestInitAndCreate("tstRTMemSafer", &hTest); 114 146 if (rcExit != RTEXITCODE_SUCCESS) 115 147 return rcExit; 116 148 RTTestBanner(hTest); 149 #ifdef VBOX 150 SUPR3Init(NULL); 151 #endif 117 152 118 doMemSaferAllocation(hTest); 153 /* 154 * Not using sub-tests here, just printing progress. 155 */ 156 RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "20 random allocations\n"); 157 for (uint32_t i = 0; i < 20; i++) 158 doMemSaferAllocation(hTest); 159 160 doMemRealloc(hTest); 119 161 120 162 return RTTestSummaryAndDestroy(hTest);
注意:
瀏覽 TracChangeset
來幫助您使用更動檢視器