summaryrefslogtreecommitdiffstats
path: root/UefiCpuPkg/CpuMpPei/CpuPaging.c
blob: 8ab7dfcce3a00c025a4580eeff9363559594fe29 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
/** @file
  Basic paging support for the CPU to enable Stack Guard.

Copyright (c) 2018 - 2019, Intel Corporation. All rights reserved.<BR>

SPDX-License-Identifier: BSD-2-Clause-Patent

**/

#include <Register/Intel/Cpuid.h>
#include <Register/Intel/Msr.h>
#include <Library/MemoryAllocationLib.h>
#include <Library/CpuLib.h>
#include <Library/BaseLib.h>
#include <Guid/MigratedFvInfo.h>

#include "CpuMpPei.h"

#define IA32_PG_P             BIT0
#define IA32_PG_RW            BIT1
#define IA32_PG_U             BIT2
#define IA32_PG_A             BIT5
#define IA32_PG_D             BIT6
#define IA32_PG_PS            BIT7
#define IA32_PG_NX            BIT63

#define PAGE_ATTRIBUTE_BITS   (IA32_PG_RW | IA32_PG_P)
#define PAGE_PROGATE_BITS     (IA32_PG_D | IA32_PG_A | IA32_PG_NX | IA32_PG_U |\
                               PAGE_ATTRIBUTE_BITS)

#define PAGING_PAE_INDEX_MASK       0x1FF
#define PAGING_4K_ADDRESS_MASK_64   0x000FFFFFFFFFF000ull
#define PAGING_2M_ADDRESS_MASK_64   0x000FFFFFFFE00000ull
#define PAGING_1G_ADDRESS_MASK_64   0x000FFFFFC0000000ull
#define PAGING_512G_ADDRESS_MASK_64 0x000FFF8000000000ull

typedef enum {
  PageNone = 0,
  PageMin  = 1,
  Page4K   = PageMin,
  Page2M   = 2,
  Page1G   = 3,
  Page512G = 4,
  PageMax  = Page512G
} PAGE_ATTRIBUTE;

typedef struct {
  PAGE_ATTRIBUTE   Attribute;
  UINT64           Length;
  UINT64           AddressMask;
  UINTN            AddressBitOffset;
  UINTN            AddressBitLength;
} PAGE_ATTRIBUTE_TABLE;

PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
  {PageNone,          0,                           0,  0, 0},
  {Page4K,     SIZE_4KB,   PAGING_4K_ADDRESS_MASK_64, 12, 9},
  {Page2M,     SIZE_2MB,   PAGING_2M_ADDRESS_MASK_64, 21, 9},
  {Page1G,     SIZE_1GB,   PAGING_1G_ADDRESS_MASK_64, 30, 9},
  {Page512G, SIZE_512GB, PAGING_512G_ADDRESS_MASK_64, 39, 9},
};

EFI_PEI_NOTIFY_DESCRIPTOR  mPostMemNotifyList[] = {
  {
    (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
    &gEfiPeiMemoryDiscoveredPpiGuid,
    MemoryDiscoveredPpiNotifyCallback
  }
};

/**
  The function will check if IA32 PAE is supported.

  @retval TRUE      IA32 PAE is supported.
  @retval FALSE     IA32 PAE is not supported.

**/
BOOLEAN
IsIa32PaeSupported (
  VOID
  )
{
  UINT32                    RegEax;
  CPUID_VERSION_INFO_EDX    RegEdx;

  AsmCpuid (CPUID_SIGNATURE, &RegEax, NULL, NULL, NULL);
  if (RegEax >= CPUID_VERSION_INFO) {
    AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
    if (RegEdx.Bits.PAE != 0) {
      return TRUE;
    }
  }

  return FALSE;
}

/**
  This API provides a way to allocate memory for page table.

  @param  Pages                 The number of 4 KB pages to allocate.

  @return A pointer to the allocated buffer or NULL if allocation fails.

**/
VOID *
AllocatePageTableMemory (
  IN UINTN           Pages
  )
{
  VOID      *Address;

  Address = AllocatePages(Pages);
  if (Address != NULL) {
    ZeroMem(Address, EFI_PAGES_TO_SIZE (Pages));
  }

  return Address;
}

/**
  Get the address width supported by current processor.

  @retval 32      If processor is in 32-bit mode.
  @retval 36-48   If processor is in 64-bit mode.

**/
UINTN
GetPhysicalAddressWidth (
  VOID
  )
{
  UINT32          RegEax;

  if (sizeof(UINTN) == 4) {
    return 32;
  }

  AsmCpuid(CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
  if (RegEax >= CPUID_VIR_PHY_ADDRESS_SIZE) {
    AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &RegEax, NULL, NULL, NULL);
    RegEax &= 0xFF;
    if (RegEax > 48) {
      return 48;
    }

    return (UINTN)RegEax;
  }

  return 36;
}

/**
  Get the type of top level page table.

  @retval Page512G  PML4 paging.
  @retval Page1G    PAE paing.

**/
PAGE_ATTRIBUTE
GetPageTableTopLevelType (
  VOID
  )
{
  MSR_IA32_EFER_REGISTER      MsrEfer;

  MsrEfer.Uint64 = AsmReadMsr64 (MSR_CORE_IA32_EFER);

  return (MsrEfer.Bits.LMA == 1) ? Page512G : Page1G;
}

/**
  Return page table entry matching the address.

  @param[in]   Address          The address to be checked.
  @param[out]  PageAttributes   The page attribute of the page entry.

  @return The page entry.
**/
VOID *
GetPageTableEntry (
  IN  PHYSICAL_ADDRESS                  Address,
  OUT PAGE_ATTRIBUTE                    *PageAttribute
  )
{
  INTN                  Level;
  UINTN                 Index;
  UINT64                *PageTable;
  UINT64                AddressEncMask;

  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
  PageTable = (UINT64 *)(UINTN)(AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
  for (Level = (INTN)GetPageTableTopLevelType (); Level > 0; --Level) {
    Index = (UINTN)RShiftU64 (Address, mPageAttributeTable[Level].AddressBitOffset);
    Index &= PAGING_PAE_INDEX_MASK;

    //
    // No mapping?
    //
    if (PageTable[Index] == 0) {
      *PageAttribute = PageNone;
      return NULL;
    }

    //
    // Page memory?
    //
    if ((PageTable[Index] & IA32_PG_PS) != 0 || Level == PageMin) {
      *PageAttribute = (PAGE_ATTRIBUTE)Level;
      return &PageTable[Index];
    }

    //
    // Page directory or table
    //
    PageTable = (UINT64 *)(UINTN)(PageTable[Index] &
                                  ~AddressEncMask &
                                  PAGING_4K_ADDRESS_MASK_64);
  }

  *PageAttribute = PageNone;
  return NULL;
}

/**
  This function splits one page entry to smaller page entries.

  @param[in]  PageEntry        The page entry to be splitted.
  @param[in]  PageAttribute    The page attribute of the page entry.
  @param[in]  SplitAttribute   How to split the page entry.
  @param[in]  Recursively      Do the split recursively or not.

  @retval RETURN_SUCCESS            The page entry is splitted.
  @retval RETURN_INVALID_PARAMETER  If target page attribute is invalid
  @retval RETURN_OUT_OF_RESOURCES   No resource to split page entry.
**/
RETURN_STATUS
SplitPage (
  IN  UINT64                            *PageEntry,
  IN  PAGE_ATTRIBUTE                    PageAttribute,
  IN  PAGE_ATTRIBUTE                    SplitAttribute,
  IN  BOOLEAN                           Recursively
  )
{
  UINT64            BaseAddress;
  UINT64            *NewPageEntry;
  UINTN             Index;
  UINT64            AddressEncMask;
  PAGE_ATTRIBUTE    SplitTo;

  if (SplitAttribute == PageNone || SplitAttribute >= PageAttribute) {
    ASSERT (SplitAttribute != PageNone);
    ASSERT (SplitAttribute < PageAttribute);
    return RETURN_INVALID_PARAMETER;
  }

  NewPageEntry = AllocatePageTableMemory (1);
  if (NewPageEntry == NULL) {
    ASSERT (NewPageEntry != NULL);
    return RETURN_OUT_OF_RESOURCES;
  }

  //
  // One level down each step to achieve more compact page table.
  //
  SplitTo = PageAttribute - 1;
  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
                   mPageAttributeTable[SplitTo].AddressMask;
  BaseAddress    = *PageEntry &
                   ~PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) &
                   mPageAttributeTable[PageAttribute].AddressMask;
  for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
    NewPageEntry[Index] = BaseAddress | AddressEncMask |
                          ((*PageEntry) & PAGE_PROGATE_BITS);

    if (SplitTo != PageMin) {
      NewPageEntry[Index] |= IA32_PG_PS;
    }

    if (Recursively && SplitTo > SplitAttribute) {
      SplitPage (&NewPageEntry[Index], SplitTo, SplitAttribute, Recursively);
    }

    BaseAddress += mPageAttributeTable[SplitTo].Length;
  }

  (*PageEntry) = (UINT64)(UINTN)NewPageEntry | AddressEncMask | PAGE_ATTRIBUTE_BITS;

  return RETURN_SUCCESS;
}

/**
  This function modifies the page attributes for the memory region specified
  by BaseAddress and Length from their current attributes to the attributes
  specified by Attributes.

  Caller should make sure BaseAddress and Length is at page boundary.

  @param[in]   BaseAddress      Start address of a memory region.
  @param[in]   Length           Size in bytes of the memory region.
  @param[in]   Attributes       Bit mask of attributes to modify.

  @retval RETURN_SUCCESS            The attributes were modified for the memory
                                    region.
  @retval RETURN_INVALID_PARAMETER  Length is zero; or,
                                    Attributes specified an illegal combination
                                    of attributes that cannot be set together; or
                                    Addressis not 4KB aligned.
  @retval RETURN_OUT_OF_RESOURCES   There are not enough system resources to modify
                                    the attributes.
  @retval RETURN_UNSUPPORTED        Cannot modify the attributes of given memory.

**/
RETURN_STATUS
EFIAPI
ConvertMemoryPageAttributes (
  IN  PHYSICAL_ADDRESS                  BaseAddress,
  IN  UINT64                            Length,
  IN  UINT64                            Attributes
  )
{
  UINT64                            *PageEntry;
  PAGE_ATTRIBUTE                    PageAttribute;
  RETURN_STATUS                     Status;
  EFI_PHYSICAL_ADDRESS              MaximumAddress;

  if (Length == 0 ||
      (BaseAddress & (SIZE_4KB - 1)) != 0 ||
      (Length & (SIZE_4KB - 1)) != 0) {

    ASSERT (Length > 0);
    ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
    ASSERT ((Length & (SIZE_4KB - 1)) == 0);

    return RETURN_INVALID_PARAMETER;
  }

  MaximumAddress = (EFI_PHYSICAL_ADDRESS)MAX_UINT32;
  if (BaseAddress > MaximumAddress ||
      Length > MaximumAddress ||
      (BaseAddress > MaximumAddress - (Length - 1))) {
    return RETURN_UNSUPPORTED;
  }

  //
  // Below logic is to check 2M/4K page to make sure we do not waste memory.
  //
  while (Length != 0) {
    PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
    if (PageEntry == NULL) {
      return RETURN_UNSUPPORTED;
    }

    if (PageAttribute != Page4K) {
      Status = SplitPage (PageEntry, PageAttribute, Page4K, FALSE);
      if (RETURN_ERROR (Status)) {
        return Status;
      }
      //
      // Do it again until the page is 4K.
      //
      continue;
    }

    //
    // Just take care of 'present' bit for Stack Guard.
    //
    if ((Attributes & IA32_PG_P) != 0) {
      *PageEntry |= (UINT64)IA32_PG_P;
    } else {
      *PageEntry &= ~((UINT64)IA32_PG_P);
    }

    //
    // Convert success, move to next
    //
    BaseAddress += SIZE_4KB;
    Length -= SIZE_4KB;
  }

  return RETURN_SUCCESS;
}

/**
  Get maximum size of page memory supported by current processor.

  @param[in]   TopLevelType     The type of top level page entry.

  @retval Page1G     If processor supports 1G page and PML4.
  @retval Page2M     For all other situations.

**/
PAGE_ATTRIBUTE
GetMaxMemoryPage (
  IN  PAGE_ATTRIBUTE  TopLevelType
  )
{
  UINT32          RegEax;
  UINT32          RegEdx;

  if (TopLevelType == Page512G) {
    AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
    if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
      AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
      if ((RegEdx & BIT26) != 0) {
        return Page1G;
      }
    }
  }

  return Page2M;
}

/**
  Create PML4 or PAE page table.

  @return The address of page table.

**/
UINTN
CreatePageTable (
  VOID
  )
{
  RETURN_STATUS           Status;
  UINTN                   PhysicalAddressBits;
  UINTN                   NumberOfEntries;
  PAGE_ATTRIBUTE          TopLevelPageAttr;
  UINTN                   PageTable;
  PAGE_ATTRIBUTE          MaxMemoryPage;
  UINTN                   Index;
  UINT64                  AddressEncMask;
  UINT64                  *PageEntry;
  EFI_PHYSICAL_ADDRESS    PhysicalAddress;

  TopLevelPageAttr = (PAGE_ATTRIBUTE)GetPageTableTopLevelType ();
  PhysicalAddressBits = GetPhysicalAddressWidth ();
  NumberOfEntries = (UINTN)1 << (PhysicalAddressBits -
                                 mPageAttributeTable[TopLevelPageAttr].AddressBitOffset);

  PageTable = (UINTN) AllocatePageTableMemory (1);
  if (PageTable == 0) {
    return 0;
  }

  AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask);
  AddressEncMask &= mPageAttributeTable[TopLevelPageAttr].AddressMask;
  MaxMemoryPage = GetMaxMemoryPage (TopLevelPageAttr);
  PageEntry = (UINT64 *)PageTable;

  PhysicalAddress = 0;
  for (Index = 0; Index < NumberOfEntries; ++Index) {
    *PageEntry = PhysicalAddress | AddressEncMask | PAGE_ATTRIBUTE_BITS;

    //
    // Split the top page table down to the maximum page size supported
    //
    if (MaxMemoryPage < TopLevelPageAttr) {
      Status = SplitPage(PageEntry, TopLevelPageAttr, MaxMemoryPage, TRUE);
      ASSERT_EFI_ERROR (Status);
    }

    if (TopLevelPageAttr == Page1G) {
      //
      // PDPTE[2:1] (PAE Paging) must be 0. SplitPage() might change them to 1.
      //
      *PageEntry &= ~(UINT64)(IA32_PG_RW | IA32_PG_U);
    }

    PageEntry += 1;
    PhysicalAddress += mPageAttributeTable[TopLevelPageAttr].Length;
  }


  return PageTable;
}

/**
  Setup page tables and make them work.

**/
VOID
EnablePaging (
  VOID
  )
{
  UINTN                       PageTable;

  PageTable = CreatePageTable ();
  ASSERT (PageTable != 0);
  if (PageTable != 0) {
    AsmWriteCr3(PageTable);
    AsmWriteCr4 (AsmReadCr4 () | BIT5);   // CR4.PAE
    AsmWriteCr0 (AsmReadCr0 () | BIT31);  // CR0.PG
  }
}

/**
  Get the base address of current AP's stack.

  This function is called in AP's context and assumes that whole calling stacks
  (till this function) consumed by AP's wakeup procedure will not exceed 4KB.

  PcdCpuApStackSize must be configured with value taking the Guard page into
  account.

  @param[in,out] Buffer  The pointer to private data buffer.

**/
VOID
EFIAPI
GetStackBase (
  IN OUT VOID *Buffer
  )
{
  EFI_PHYSICAL_ADDRESS    StackBase;

  StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)&StackBase;
  StackBase += BASE_4KB;
  StackBase &= ~((EFI_PHYSICAL_ADDRESS)BASE_4KB - 1);
  StackBase -= PcdGet32(PcdCpuApStackSize);

  *(EFI_PHYSICAL_ADDRESS *)Buffer = StackBase;
}

/**
  Setup stack Guard page at the stack base of each processor. BSP and APs have
  different way to get stack base address.

**/
VOID
SetupStackGuardPage (
  VOID
  )
{
  EFI_PEI_HOB_POINTERS        Hob;
  EFI_PHYSICAL_ADDRESS        StackBase;
  UINTN                       NumberOfProcessors;
  UINTN                       Bsp;
  UINTN                       Index;

  //
  // One extra page at the bottom of the stack is needed for Guard page.
  //
  if (PcdGet32(PcdCpuApStackSize) <= EFI_PAGE_SIZE) {
    DEBUG ((DEBUG_ERROR, "PcdCpuApStackSize is not big enough for Stack Guard!\n"));
    ASSERT (FALSE);
  }

  MpInitLibGetNumberOfProcessors(&NumberOfProcessors, NULL);
  MpInitLibWhoAmI (&Bsp);
  for (Index = 0; Index < NumberOfProcessors; ++Index) {
    StackBase = 0;

    if (Index == Bsp) {
      Hob.Raw = GetHobList ();
      while ((Hob.Raw = GetNextHob (EFI_HOB_TYPE_MEMORY_ALLOCATION, Hob.Raw)) != NULL) {
        if (CompareGuid (&gEfiHobMemoryAllocStackGuid,
                         &(Hob.MemoryAllocationStack->AllocDescriptor.Name))) {
          StackBase = Hob.MemoryAllocationStack->AllocDescriptor.MemoryBaseAddress;
          break;
        }
        Hob.Raw = GET_NEXT_HOB (Hob);
      }
    } else {
      //
      // Ask AP to return is stack base address.
      //
      MpInitLibStartupThisAP(GetStackBase, Index, NULL, 0, (VOID *)&StackBase, NULL);
    }
    ASSERT (StackBase != 0);
    //
    // Set Guard page at stack base address.
    //
    ConvertMemoryPageAttributes(StackBase, EFI_PAGE_SIZE, 0);
    DEBUG ((DEBUG_INFO, "Stack Guard set at %lx [cpu%lu]!\n",
            (UINT64)StackBase, (UINT64)Index));
  }

  //
  // Publish the changes of page table.
  //
  CpuFlushTlb ();
}

/**
  Enabl/setup stack guard for each processor if PcdCpuStackGuard is set to TRUE.

  Doing this in the memory-discovered callback is to make sure the Stack Guard
  feature to cover as most PEI code as possible.

  @param[in] PeiServices          General purpose services available to every PEIM.
  @param[in] NotifyDescriptor     The notification structure this PEIM registered on install.
  @param[in] Ppi                  The memory discovered PPI.  Not used.

  @retval EFI_SUCCESS             The function completed successfully.
  @retval others                  There's error in MP initialization.
**/
EFI_STATUS
EFIAPI
MemoryDiscoveredPpiNotifyCallback (
  IN EFI_PEI_SERVICES           **PeiServices,
  IN EFI_PEI_NOTIFY_DESCRIPTOR  *NotifyDescriptor,
  IN VOID                       *Ppi
  )
{
  EFI_STATUS              Status;
  BOOLEAN                 InitStackGuard;
  BOOLEAN                 InterruptState;
  EDKII_MIGRATED_FV_INFO  *MigratedFvInfo;
  EFI_PEI_HOB_POINTERS    Hob;

  if (PcdGetBool (PcdMigrateTemporaryRamFirmwareVolumes)) {
    InterruptState = SaveAndDisableInterrupts ();
    Status = MigrateGdt ();
    ASSERT_EFI_ERROR (Status);
    SetInterruptState (InterruptState);
  }

  //
  // Paging must be setup first. Otherwise the exception TSS setup during MP
  // initialization later will not contain paging information and then fail
  // the task switch (for the sake of stack switch).
  //
  InitStackGuard = FALSE;
  Hob.Raw = NULL;
  if (IsIa32PaeSupported ()) {
    Hob.Raw  = GetFirstGuidHob (&gEdkiiMigratedFvInfoGuid);
    InitStackGuard = PcdGetBool (PcdCpuStackGuard);
  }

  if (InitStackGuard || Hob.Raw != NULL) {
    EnablePaging ();
  }

  Status = InitializeCpuMpWorker ((CONST EFI_PEI_SERVICES **)PeiServices);
  ASSERT_EFI_ERROR (Status);

  if (InitStackGuard) {
    SetupStackGuardPage ();
  }

  while (Hob.Raw != NULL) {
    MigratedFvInfo = GET_GUID_HOB_DATA (Hob);

    //
    // Enable #PF exception, so if the code access SPI after disable NEM, it will generate
    // the exception to avoid potential vulnerability.
    //
    ConvertMemoryPageAttributes (MigratedFvInfo->FvOrgBase, MigratedFvInfo->FvLength, 0);

    Hob.Raw = GET_NEXT_HOB (Hob);
    Hob.Raw = GetNextGuidHob (&gEdkiiMigratedFvInfoGuid, Hob.Raw);
  }
  CpuFlushTlb ();

  return Status;
}