VirtualBox

source: vbox/trunk/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/CpuMpPei/CpuPaging.c@ 90304

最後變更 在這個檔案從90304是 89983,由 vboxsync 提交於 4 年 前

Devices/EFI: Merge edk-stable202105 and openssl 1.1.1j and make it build, bugref:4643

  • 屬性 svn:eol-style 設為 native
檔案大小: 19.0 KB
 
1/** @file
2 Basic paging support for the CPU to enable Stack Guard.
3
4Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>
5
6SPDX-License-Identifier: BSD-2-Clause-Patent
7
8**/
9
10#include <Register/Intel/Cpuid.h>
11#include <Register/Intel/Msr.h>
12#include <Library/MemoryAllocationLib.h>
13#include <Library/CpuLib.h>
14#include <Library/BaseLib.h>
15#include <Guid/MigratedFvInfo.h>
16#ifdef VBOX
17# define IN_RING0
18# include <iprt/asm.h>
19#endif
20
21#include "CpuMpPei.h"
22
23#define IA32_PG_P BIT0
24#define IA32_PG_RW BIT1
25#define IA32_PG_U BIT2
26#define IA32_PG_A BIT5
27#define IA32_PG_D BIT6
28#define IA32_PG_PS BIT7
29#define IA32_PG_NX BIT63
30
31#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
32#define PAGE_PROGATE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
33 PAGE_ATTRIBUTE_BITS)
34
35#define PAGING_PAE_INDEX_MASK 0x1FF
36#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
37#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
38#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
39#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull
40
41typedef enum {
42 PageNone = 0,
43 PageMin = 1,
44 Page4K = PageMin,
45 Page2M = 2,
46 Page1G = 3,
47 Page512G = 4,
48 PageMax = Page512G
49} PAGE_ATTRIBUTE;
50
51typedef struct {
52 PAGE_ATTRIBUTE Attribute;
53 UINT64 Length;
54 UINT64 AddressMask;
55 UINTN AddressBitOffset;
56 UINTN AddressBitLength;
57} PAGE_ATTRIBUTE_TABLE;
58
59PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
60 {PageNone, 0, 0, 0, 0},
61 {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64, 12, 9},
62 {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64, 21, 9},
63 {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64, 30, 9},
64 {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
65};
66
67EFI_PEI_NOTIFY_DESCRIPTOR mPostMemNotifyList[] = {
68 {
69 (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
70 &gEfiPeiMemoryDiscoveredPpiGuid,
71 MemoryDiscoveredPpiNotifyCallback
72 }
73};
74
75#ifdef VBOX
76/**
77 Safe page table entry write function, make 104% sure the compiler won't
78 split up the access (fatal if modifying entries for current code or data).
79
80 @param[in] PageEntry The page table entry to modify.*
81 @param[in] CurrentPageEntry The old page table value (for cmpxchg8b).
82 @param[in] NewPageEntry What to write.
83**/
84static VOID SafePageTableEntryWrite64 (UINT64 volatile *PageEntry, UINT64 CurrentPageEntry, UINT64 NewPageEntry)
85{
86# ifdef VBOX
87 ASMAtomicWriteU64(PageEntry, NewPageEntry); RT_NOREF(CurrentPageEntry);
88# else
89 for (;;) {
90 UINT64 CurValue = InterlockedCompareExchange64(PageEntry, CurrentPageEntry, NewPageEntry);
91 if (CurValue == CurrentPageEntry)
92 return;
93 CurrentPageEntry = CurValue;
94 }
95# endif
96}
97#endif
98
99/**
100 The function will check if IA32 PAE is supported.
101
102 @retval TRUE IA32 PAE is supported.
103 @retval FALSE IA32 PAE is not supported.
104
105**/
106BOOLEAN
107IsIa32PaeSupported (
108 VOID
109 )
110{
111 UINT32 RegEax;
112 CPUID_VERSION_INFO_EDX RegEdx;
113
114 AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
115 if (RegEax >= CPUID_VERSION_INFO) {
116 AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
117 if (RegEdx.Bits.PAE != 0) {
118 return TRUE;
119 }
120 }
121
122 return FALSE;
123}
124
125/**
126 This API provides a way to allocate memory for page table.
127
128 @param Pages The number of 4 KB pages to allocate.
129
130 @return A pointer to the allocated buffer or NULL if allocation fails.
131
132**/
133VOID *
134AllocatePageTableMemory (
135 IN UINTN Pages
136 )
137{
138 VOID *Address;
139
140 Address = AllocatePages(Pages);
141 if (Address != NULL) {
142 ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
143 }
144
145 return Address;
146}
147
148/**
149 Get the address width supported by current processor.
150
151 @retval 32 If processor is in 32-bit mode.
152 @retval 36-48 If processor is in 64-bit mode.
153
154**/
155UINTN
156GetPhysicalAddressWidth (
157 VOID
158 )
159{
160 UINT32 RegEax;
161
162 if (sizeof(UINTN) == 4) {
163 return 32;
164 }
165
166 AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
167 if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
168 AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
169 RegEax &= 0xFF;
170 if (RegEax > 48) {
171 return 48;
172 }
173
174 return (UINTN)RegEax;
175 }
176
177 return 36;
178}
179
180/**
181 Get the type of top level page table.
182
183 @retval Page512G PML4 paging.
184 @retval Page1G PAE paging.
185
186**/
187PAGE_ATTRIBUTE
188GetPageTableTopLevelType (
189 VOID
190 )
191{
192 MSR_IA32_EFER_REGISTER MsrEfer;
193
194 MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);
195
196 return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
197}
198
199/**
200 Return page table entry matching the address.
201
202 @param[in] Address The address to be checked.
203 @param[out] PageAttributes The page attribute of the page entry.
204
205 @return The page entry.
206**/
207VOID *
208GetPageTableEntry (
209 IN PHYSICAL_ADDRESS Address,
210 OUT PAGE_ATTRIBUTE *PageAttribute
211 )
212{
213 INTN Level;
214 UINTN Index;
215 UINT64 *PageTable;
216 UINT64 AddressEncMask;
217
218 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
219 PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
220 for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
221 Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
222 Index &= PAGING_PAE_INDEX_MASK;
223
224 //
225 // No mapping?
226 //
227 if (PageTable[Index] == 0) {
228 *PageAttribute = PageNone;
229 return NULL;
230 }
231
232 //
233 // Page memory?
234 //
235 if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
236 *PageAttribute = (PAGE_ATTRIBUTE)Level;
237 return &PageTable[Index];
238 }
239
240 //
241 // Page directory or table
242 //
243 PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
244 ~AddressEncMask &
245 PAGING_4K_ADDRESS_MASK_64);
246 }
247
248 *PageAttribute = PageNone;
249 return NULL;
250}
251
252/**
253 This function splits one page entry to smaller page entries.
254
255 @param[in] PageEntry The page entry to be splitted.
256 @param[in] PageAttribute The page attribute of the page entry.
257 @param[in] SplitAttribute How to split the page entry.
258 @param[in] Recursively Do the split recursively or not.
259
260 @retval RETURN_SUCCESS The page entry is splitted.
261 @retval RETURN_INVALID_PARAMETER If target page attribute is invalid
262 @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
263**/
264RETURN_STATUS
265SplitPage (
266#ifdef VBOX
267 IN UINT64 volatile *PageEntry,
268#else
269 IN UINT64 *PageEntry,
270#endif
271 IN PAGE_ATTRIBUTE PageAttribute,
272 IN PAGE_ATTRIBUTE SplitAttribute,
273 IN BOOLEAN Recursively
274 )
275{
276#ifdef VBOX
277 UINT64 CurrentPageEntry;
278#endif
279 UINT64 BaseAddress;
280 UINT64 *NewPageEntry;
281 UINTN Index;
282 UINT64 AddressEncMask;
283 PAGE_ATTRIBUTE SplitTo;
284
285 if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
286 ASSERT (SplitAttribute != PageNone);
287 ASSERT (SplitAttribute < PageAttribute);
288 return RETURN_INVALID_PARAMETER;
289 }
290
291 NewPageEntry = AllocatePageTableMemory (1);
292 if (NewPageEntry == NULL) {
293 ASSERT (NewPageEntry != NULL);
294 return RETURN_OUT_OF_RESOURCES;
295 }
296
297 //
298 // One level down each step to achieve more compact page table.
299 //
300 SplitTo = PageAttribute - 1;
301 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
302 mPageAttributeTable[SplitTo].AddressMask;
303#ifdef VBOX
304 CurrentPageEntry = *PageEntry;
305 BaseAddress = CurrentPageEntry &
306#else
307 BaseAddress = *PageEntry &
308#endif
309 ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
310 mPageAttributeTable[PageAttribute].AddressMask;
311 for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
312 NewPageEntry[Index] = BaseAddress | AddressEncMask |
313#ifdef VBOX
314 (CurrentPageEntry & PAGE_PROGATE_BITS);
315#else
316 ((*PageEntry) & PAGE_PROGATE_BITS);
317#endif
318
319 if (SplitTo != PageMin) {
320 NewPageEntry[Index] |= IA32_PG_PS;
321 }
322
323 if (Recursively && SplitTo > SplitAttribute) {
324 SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
325 }
326
327 BaseAddress += mPageAttributeTable[SplitTo].Length;
328 }
329
330#ifdef VBOX
331 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
332 (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS);
333#else
334 (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;
335#endif
336
337 return RETURN_SUCCESS;
338}
339
340/**
341 This function modifies the page attributes for the memory region specified
342 by BaseAddress and Length from their current attributes to the attributes
343 specified by Attributes.
344
345 Caller should make sure BaseAddress and Length is at page boundary.
346
347 @param[in] BaseAddress Start address of a memory region.
348 @param[in] Length Size in bytes of the memory region.
349 @param[in] Attributes Bit mask of attributes to modify.
350
351 @retval RETURN_SUCCESS The attributes were modified for the memory
352 region.
353 @retval RETURN_INVALID_PARAMETER Length is zero; or,
354 Attributes specified an illegal combination
355 of attributes that cannot be set together; or
356 Addressis not 4KB aligned.
357 @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify
358 the attributes.
359 @retval RETURN_UNSUPPORTED Cannot modify the attributes of given memory.
360
361**/
362RETURN_STATUS
363EFIAPI
364ConvertMemoryPageAttributes (
365 IN PHYSICAL_ADDRESS BaseAddress,
366 IN UINT64 Length,
367 IN UINT64 Attributes
368 )
369{
370#ifdef VBOX
371 UINT64 volatile *PageEntry;
372 UINT64 CurrentPageEntry;
373#else
374 UINT64 *PageEntry;
375#endif
376 PAGE_ATTRIBUTE PageAttribute;
377 RETURN_STATUS Status;
378 EFI_PHYSICAL_ADDRESS MaximumAddress;
379
380 if (Length == 0 ||
381 (BaseAddress & (SIZE_4KB - 1)) != 0 ||
382 (Length & (SIZE_4KB - 1)) != 0) {
383
384 ASSERT (Length > 0);
385 ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
386 ASSERT ((Length & (SIZE_4KB - 1)) == 0);
387
388 return RETURN_INVALID_PARAMETER;
389 }
390
391 MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
392 if (BaseAddress > MaximumAddress ||
393 Length > MaximumAddress ||
394 (BaseAddress > MaximumAddress - (Length - 1))) {
395 return RETURN_UNSUPPORTED;
396 }
397
398 //
399 // Below logic is to check 2M/4K page to make sure we do not waste memory.
400 //
401 while (Length != 0) {
402 PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
403 if (PageEntry == NULL) {
404 return RETURN_UNSUPPORTED;
405 }
406
407 if (PageAttribute != Page4K) {
408 Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
409 if (RETURN_ERROR (Status)) {
410 return Status;
411 }
412 //
413 // Do it again until the page is 4K.
414 //
415 continue;
416 }
417
418 //
419 // Just take care of 'present' bit for Stack Guard.
420 //
421#ifdef VBOX
422 CurrentPageEntry = *PageEntry;
423 if ((CurrentPageEntry & IA32_PG_P) != (Attributes & IA32_PG_P))
424 SafePageTableEntryWrite64 (PageEntry, CurrentPageEntry,
425 (CurrentPageEntry & ~(UINT64)IA32_PG_P) | (Attributes & IA32_PG_P));
426#else
427 if ((Attributes & IA32_PG_P) != 0) {
428 *PageEntry |= (UINT64)IA32_PG_P;
429 } else {
430 *PageEntry &= ~((UINT64)IA32_PG_P);
431 }
432#endif
433
434 //
435 // Convert success, move to next
436 //
437 BaseAddress += SIZE_4KB;
438 Length -= SIZE_4KB;
439 }
440
441 return RETURN_SUCCESS;
442}
443
444/**
445 Get maximum size of page memory supported by current processor.
446
447 @param[in] TopLevelType The type of top level page entry.
448
449 @retval Page1G If processor supports 1G page and PML4.
450 @retval Page2M For all other situations.
451
452**/
453PAGE_ATTRIBUTE
454GetMaxMemoryPage (
455 IN PAGE_ATTRIBUTE TopLevelType
456 )
457{
458 UINT32 RegEax;
459 UINT32 RegEdx;
460
461 if (TopLevelType == Page512G) {
462 AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
463 if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
464 AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
465 if ((RegEdx & BIT26) != 0) {
466 return Page1G;
467 }
468 }
469 }
470
471 return Page2M;
472}
473
474/**
475 Create PML4 or PAE page table.
476
477 @return The address of page table.
478
479**/
480UINTN
481CreatePageTable (
482 VOID
483 )
484{
485 RETURN_STATUS Status;
486 UINTN PhysicalAddressBits;
487 UINTN NumberOfEntries;
488 PAGE_ATTRIBUTE TopLevelPageAttr;
489 UINTN PageTable;
490 PAGE_ATTRIBUTE MaxMemoryPage;
491 UINTN Index;
492 UINT64 AddressEncMask;
493 UINT64 *PageEntry;
494 EFI_PHYSICAL_ADDRESS PhysicalAddress;
495
496 TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
497 PhysicalAddressBits = GetPhysicalAddressWidth ();
498 NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
499 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);
500
501 PageTable = (UINTN) AllocatePageTableMemory (1);
502 if (PageTable == 0) {
503 return 0;
504 }
505
506 AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
507 AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
508 MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
509 PageEntry = (UINT64 *)PageTable;
510
511 PhysicalAddress = 0;
512 for (Index = 0; Index < NumberOfEntries; ++Index) {
513 *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;
514
515 //
516 // Split the top page table down to the maximum page size supported
517 //
518 if (MaxMemoryPage < TopLevelPageAttr) {
519 Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
520 ASSERT_EFI_ERROR (Status);
521 }
522
523 if (TopLevelPageAttr == Page1G) {
524 //
525 // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
526 //
527 *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
528 }
529
530 PageEntry += 1;
531 PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
532 }
533
534
535 return PageTable;
536}
537
538/**
539 Setup page tables and make them work.
540
541**/
542VOID
543EnablePaging (
544 VOID
545 )
546{
547 UINTN PageTable;
548
549 PageTable = CreatePageTable ();
550 ASSERT (PageTable != 0);
551 if (PageTable != 0) {
552 AsmWriteCr3(PageTable);
553 AsmWriteCr4 (AsmReadCr4 () | BIT5); // CR4.PAE
554 AsmWriteCr0 (AsmReadCr0 () | BIT31); // CR0.PG
555 }
556}
557
558/**
559 Get the base address of current AP's stack.
560
561 This function is called in AP's context and assumes that whole calling stacks
562 (till this function) consumed by AP's wakeup procedure will not exceed 4KB.
563
564 PcdCpuApStackSize must be configured with value taking the Guard page into
565 account.
566
567 @param[in,out] Buffer The pointer to private data buffer.
568
569**/
570VOID
571EFIAPI
572GetStackBase (
573 IN OUT VOID *Buffer
574 )
575{
576 EFI_PHYSICAL_ADDRESS StackBase;
577
578 StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
579 StackBase += BASE_4KB;
580 StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
581 StackBase -= PcdGet32(PcdCpuApStackSize);
582
583 *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
584}
585
586/**
587 Setup stack Guard page at the stack base of each processor. BSP and APs have
588 different way to get stack base address.
589
590**/
591VOID
592SetupStackGuardPage (
593 VOID
594 )
595{
596 EFI_PEI_HOB_POINTERS Hob;
597 EFI_PHYSICAL_ADDRESS StackBase;
598 UINTN NumberOfProcessors;
599 UINTN Bsp;
600 UINTN Index;
601
602 //
603 // One extra page at the bottom of the stack is needed for Guard page.
604 //
605 if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
606 DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
607 ASSERT (FALSE);
608 }
609
610 MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
611 MpInitLibWhoAmI (&Bsp);
612 for (Index = 0; Index < NumberOfProcessors; ++Index) {
613 StackBase = 0;
614
615 if (Index == Bsp) {
616 Hob.Raw = GetHobList ();
617 while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
618 if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
619 &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
620 StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
621 break;
622 }
623 Hob.Raw = GET_NEXT_HOB (Hob);
624 }
625 } else {
626 //
627 // Ask AP to return is stack base address.
628 //
629 MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
630 }
631 ASSERT (StackBase != 0);
632 //
633 // Set Guard page at stack base address.
634 //
635 ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
636 DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
637 (UINT64)StackBase, (UINT64)Index));
638 }
639
640 //
641 // Publish the changes of page table.
642 //
643 CpuFlushTlb ();
644}
645
646/**
647 Enable/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.
648
649 Doing this in the memory-discovered callback is to make sure the Stack Guard
650 feature to cover as most PEI code as possible.
651
652 @param[in] PeiServices General purpose services available to every PEIM.
653 @param[in] NotifyDescriptor The notification structure this PEIM registered on install.
654 @param[in] Ppi The memory discovered PPI. Not used.
655
656 @retval EFI_SUCCESS The function completed successfully.
657 @retval others There's error in MP initialization.
658**/
659EFI_STATUS
660EFIAPI
661MemoryDiscoveredPpiNotifyCallback (
662 IN EFI_PEI_SERVICES **PeiServices,
663 IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDescriptor,
664 IN VOID *Ppi
665 )
666{
667 EFI_STATUS Status;
668 BOOLEAN InitStackGuard;
669 EDKII_MIGRATED_FV_INFO *MigratedFvInfo;
670 EFI_PEI_HOB_POINTERS Hob;
671
672 //
673 // Paging must be setup first. Otherwise the exception TSS setup during MP
674 // initialization later will not contain paging information and then fail
675 // the task switch (for the sake of stack switch).
676 //
677 InitStackGuard = FALSE;
678 Hob.Raw = NULL;
679 if (IsIa32PaeSupported ()) {
680 Hob.Raw = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
681 InitStackGuard = PcdGetBool (PcdCpuStackGuard);
682 }
683
684 if (InitStackGuard || Hob.Raw != NULL) {
685 EnablePaging ();
686 }
687
688 Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
689 ASSERT_EFI_ERROR (Status);
690
691 if (InitStackGuard) {
692 SetupStackGuardPage ();
693 }
694
695 while (Hob.Raw != NULL) {
696 MigratedFvInfo = GET_GUID_HOB_DATA (Hob);
697
698 //
699 // Enable #PF exception, so if the code access SPI after disable NEM, it will generate
700 // the exception to avoid potential vulnerability.
701 //
702 ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0);
703
704 Hob.Raw = GET_NEXT_HOB (Hob);
705 Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw);
706 }
707 CpuFlushTlb ();
708
709 return Status;
710}
711
注意: 瀏覽 TracBrowser 來幫助您使用儲存庫瀏覽器

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette