• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /** @file
2 Page Fault (#PF) handler for X64 processors
3 
4 Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution.  The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9 
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12 
13 **/
14 
15 #include "PiSmmCpuDxeSmm.h"
16 
17 #define PAGE_TABLE_PAGES            8
18 #define ACC_MAX_BIT                 BIT3
19 LIST_ENTRY                          mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
20 SPIN_LOCK                           mPFLock;
21 BOOLEAN                             m1GPageTableSupport = FALSE;
22 
23 /**
24   Check if 1-GByte pages is supported by processor or not.
25 
26   @retval TRUE   1-GByte pages is supported.
27   @retval FALSE  1-GByte pages is not supported.
28 
29 **/
30 BOOLEAN
Is1GPageSupport(VOID)31 Is1GPageSupport (
32   VOID
33   )
34 {
35   UINT32         RegEax;
36   UINT32         RegEdx;
37 
38   AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
39   if (RegEax >= 0x80000001) {
40     AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
41     if ((RegEdx & BIT26) != 0) {
42       return TRUE;
43     }
44   }
45   return FALSE;
46 }
47 
48 /**
49   Set sub-entries number in entry.
50 
51   @param[in, out] Entry        Pointer to entry
52   @param[in]      SubEntryNum  Sub-entries number based on 0:
53                                0 means there is 1 sub-entry under this entry
54                                0x1ff means there is 512 sub-entries under this entry
55 
56 **/
57 VOID
SetSubEntriesNum(IN OUT UINT64 * Entry,IN UINT64 SubEntryNum)58 SetSubEntriesNum (
59   IN OUT UINT64               *Entry,
60   IN     UINT64               SubEntryNum
61   )
62 {
63   //
64   // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
65   //
66   *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
67 }
68 
69 /**
70   Return sub-entries number in entry.
71 
72   @param[in] Entry        Pointer to entry
73 
74   @return Sub-entries number based on 0:
75           0 means there is 1 sub-entry under this entry
76           0x1ff means there is 512 sub-entries under this entry
77 **/
78 UINT64
GetSubEntriesNum(IN UINT64 * Entry)79 GetSubEntriesNum (
80   IN UINT64            *Entry
81   )
82 {
83   //
84   // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
85   //
86   return BitFieldRead64 (*Entry, 52, 60);
87 }
88 
89 /**
90   Create PageTable for SMM use.
91 
92   @return The address of PML4 (to set CR3).
93 
94 **/
95 UINT32
SmmInitPageTable(VOID)96 SmmInitPageTable (
97   VOID
98   )
99 {
100   EFI_PHYSICAL_ADDRESS              Pages;
101   UINT64                            *PTEntry;
102   LIST_ENTRY                        *FreePage;
103   UINTN                             Index;
104   UINTN                             PageFaultHandlerHookAddress;
105   IA32_IDT_GATE_DESCRIPTOR          *IdtEntry;
106 
107   //
108   // Initialize spin lock
109   //
110   InitializeSpinLock (&mPFLock);
111 
112   m1GPageTableSupport = Is1GPageSupport ();
113   //
114   // Generate PAE page table for the first 4GB memory space
115   //
116   Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE);
117 
118   //
119   // Set IA32_PG_PMNT bit to mask this entry
120   //
121   PTEntry = (UINT64*)(UINTN)Pages;
122   for (Index = 0; Index < 4; Index++) {
123     PTEntry[Index] |= IA32_PG_PMNT;
124   }
125 
126   //
127   // Fill Page-Table-Level4 (PML4) entry
128   //
129   PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));
130   *PTEntry = Pages + PAGE_ATTRIBUTE_BITS;
131   ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
132   //
133   // Set sub-entries number
134   //
135   SetSubEntriesNum (PTEntry, 3);
136 
137   //
138   // Add remaining pages to page pool
139   //
140   FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));
141   while ((UINTN)FreePage < Pages) {
142     InsertTailList (&mPagePool, FreePage);
143     FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
144   }
145 
146   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
147     //
148     // Set own Page Fault entry instead of the default one, because SMM Profile
149     // feature depends on IRET instruction to do Single Step
150     //
151     PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
152     IdtEntry  = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
153     IdtEntry += EXCEPT_IA32_PAGE_FAULT;
154     IdtEntry->Bits.OffsetLow      = (UINT16)PageFaultHandlerHookAddress;
155     IdtEntry->Bits.Reserved_0     = 0;
156     IdtEntry->Bits.GateType       = IA32_IDT_GATE_TYPE_INTERRUPT_32;
157     IdtEntry->Bits.OffsetHigh     = (UINT16)(PageFaultHandlerHookAddress >> 16);
158     IdtEntry->Bits.OffsetUpper    = (UINT32)(PageFaultHandlerHookAddress >> 32);
159     IdtEntry->Bits.Reserved_1     = 0;
160   } else {
161     //
162     // Register Smm Page Fault Handler
163     //
164     SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
165   }
166 
167   //
168   // Additional SMM IDT initialization for SMM stack guard
169   //
170   if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
171     InitializeIDTSmmStackGuard ();
172   }
173 
174   //
175   // Return the address of PML4 (to set CR3)
176   //
177   return (UINT32)(UINTN)PTEntry;
178 }
179 
180 /**
181   Set access record in entry.
182 
183   @param[in, out] Entry        Pointer to entry
184   @param[in]      Acc          Access record value
185 
186 **/
187 VOID
SetAccNum(IN OUT UINT64 * Entry,IN UINT64 Acc)188 SetAccNum (
189   IN OUT UINT64               *Entry,
190   IN     UINT64               Acc
191   )
192 {
193   //
194   // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
195   //
196   *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
197 }
198 
199 /**
200   Return access record in entry.
201 
202   @param[in] Entry        Pointer to entry
203 
204   @return Access record value.
205 
206 **/
207 UINT64
GetAccNum(IN UINT64 * Entry)208 GetAccNum (
209   IN UINT64            *Entry
210   )
211 {
212   //
213   // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
214   //
215   return BitFieldRead64 (*Entry, 9, 11);
216 }
217 
218 /**
219   Return and update the access record in entry.
220 
221   @param[in, out]  Entry    Pointer to entry
222 
223   @return Access record value.
224 
225 **/
226 UINT64
GetAndUpdateAccNum(IN OUT UINT64 * Entry)227 GetAndUpdateAccNum (
228   IN OUT UINT64      *Entry
229   )
230 {
231   UINT64         Acc;
232 
233   Acc = GetAccNum (Entry);
234   if ((*Entry & IA32_PG_A) != 0) {
235     //
236     // If this entry has been accessed, clear access flag in Entry and update access record
237     // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
238     //
239     *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
240     SetAccNum (Entry, 0x7);
241     return (0x7 + ACC_MAX_BIT);
242   } else {
243     if (Acc != 0) {
244       //
245       // If the access record is not the smallest value 0, minus 1 and update the access record field
246       //
247       SetAccNum (Entry, Acc - 1);
248     }
249   }
250   return Acc;
251 }
252 
253 /**
254   Reclaim free pages for PageFault handler.
255 
256   Search the whole entries tree to find the leaf entry that has the smallest
257   access record value. Insert the page pointed by this leaf entry into the
258   page pool. And check its upper entries if need to be inserted into the page
259   pool or not.
260 
261 **/
262 VOID
ReclaimPages(VOID)263 ReclaimPages (
264   VOID
265   )
266 {
267   UINT64                       *Pml4;
268   UINT64                       *Pdpt;
269   UINT64                       *Pdt;
270   UINTN                        Pml4Index;
271   UINTN                        PdptIndex;
272   UINTN                        PdtIndex;
273   UINTN                        MinPml4;
274   UINTN                        MinPdpt;
275   UINTN                        MinPdt;
276   UINT64                       MinAcc;
277   UINT64                       Acc;
278   UINT64                       SubEntriesNum;
279   BOOLEAN                      PML4EIgnore;
280   BOOLEAN                      PDPTEIgnore;
281   UINT64                       *ReleasePageAddress;
282 
283   Pml4 = NULL;
284   Pdpt = NULL;
285   Pdt  = NULL;
286   MinAcc  = (UINT64)-1;
287   MinPml4 = (UINTN)-1;
288   MinPdpt = (UINTN)-1;
289   MinPdt  = (UINTN)-1;
290   Acc     = 0;
291   ReleasePageAddress = 0;
292 
293   //
294   // First, find the leaf entry has the smallest access record value
295   //
296   Pml4 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
297   for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
298     if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
299       //
300       // If the PML4 entry is not present or is masked, skip it
301       //
302       continue;
303     }
304     Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & gPhyMask);
305     PML4EIgnore = FALSE;
306     for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
307       if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
308         //
309         // If the PDPT entry is not present or is masked, skip it
310         //
311         if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
312           //
313           // If the PDPT entry is masked, we will ignore checking the PML4 entry
314           //
315           PML4EIgnore = TRUE;
316         }
317         continue;
318       }
319       if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
320         //
321         // It's not 1-GByte pages entry, it should be a PDPT entry,
322         // we will not check PML4 entry more
323         //
324         PML4EIgnore = TRUE;
325         Pdt =  (UINT64*)(UINTN)(Pdpt[PdptIndex] & gPhyMask);
326         PDPTEIgnore = FALSE;
327         for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
328           if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
329             //
330             // If the PD entry is not present or is masked, skip it
331             //
332             if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
333               //
334               // If the PD entry is masked, we will not PDPT entry more
335               //
336               PDPTEIgnore = TRUE;
337             }
338             continue;
339           }
340           if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
341             //
342             // It's not 2 MByte page table entry, it should be PD entry
343             // we will find the entry has the smallest access record value
344             //
345             PDPTEIgnore = TRUE;
346             Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
347             if (Acc < MinAcc) {
348               //
349               // If the PD entry has the smallest access record value,
350               // save the Page address to be released
351               //
352               MinAcc  = Acc;
353               MinPml4 = Pml4Index;
354               MinPdpt = PdptIndex;
355               MinPdt  = PdtIndex;
356               ReleasePageAddress = Pdt + PdtIndex;
357             }
358           }
359         }
360         if (!PDPTEIgnore) {
361           //
362           // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
363           // it should only has the entries point to 2 MByte Pages
364           //
365           Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
366           if (Acc < MinAcc) {
367             //
368             // If the PDPT entry has the smallest access record value,
369             // save the Page address to be released
370             //
371             MinAcc  = Acc;
372             MinPml4 = Pml4Index;
373             MinPdpt = PdptIndex;
374             MinPdt  = (UINTN)-1;
375             ReleasePageAddress = Pdpt + PdptIndex;
376           }
377         }
378       }
379     }
380     if (!PML4EIgnore) {
381       //
382       // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
383       // it should only has the entries point to 1 GByte Pages
384       //
385       Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
386       if (Acc < MinAcc) {
387         //
388         // If the PML4 entry has the smallest access record value,
389         // save the Page address to be released
390         //
391         MinAcc  = Acc;
392         MinPml4 = Pml4Index;
393         MinPdpt = (UINTN)-1;
394         MinPdt  = (UINTN)-1;
395         ReleasePageAddress = Pml4 + Pml4Index;
396       }
397     }
398   }
399   //
400   // Make sure one PML4/PDPT/PD entry is selected
401   //
402   ASSERT (MinAcc != (UINT64)-1);
403 
404   //
405   // Secondly, insert the page pointed by this entry into page pool and clear this entry
406   //
407   InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & gPhyMask));
408   *ReleasePageAddress = 0;
409 
410   //
411   // Lastly, check this entry's upper entries if need to be inserted into page pool
412   // or not
413   //
414   while (TRUE) {
415     if (MinPdt != (UINTN)-1) {
416       //
417       // If 4 KByte Page Table is released, check the PDPT entry
418       //
419       Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & gPhyMask);
420       SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
421       if (SubEntriesNum == 0) {
422         //
423         // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
424         // clear the Page directory entry
425         //
426         InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & gPhyMask));
427         Pdpt[MinPdpt] = 0;
428         //
429         // Go on checking the PML4 table
430         //
431         MinPdt = (UINTN)-1;
432         continue;
433       }
434       //
435       // Update the sub-entries filed in PDPT entry and exit
436       //
437       SetSubEntriesNum (Pdpt + MinPdpt, SubEntriesNum - 1);
438       break;
439     }
440     if (MinPdpt != (UINTN)-1) {
441       //
442       // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
443       //
444       SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
445       if (SubEntriesNum == 0) {
446         //
447         // Release the empty PML4 table if there was no more 1G KByte Page Table entry
448         // clear the Page directory entry
449         //
450         InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & gPhyMask));
451         Pml4[MinPml4] = 0;
452         MinPdpt = (UINTN)-1;
453         continue;
454       }
455       //
456       // Update the sub-entries filed in PML4 entry and exit
457       //
458       SetSubEntriesNum (Pml4 + MinPml4, SubEntriesNum - 1);
459       break;
460     }
461     //
462     // PLM4 table has been released before, exit it
463     //
464     break;
465   }
466 }
467 
468 /**
469   Allocate free Page for PageFault handler use.
470 
471   @return Page address.
472 
473 **/
474 UINT64
AllocPage(VOID)475 AllocPage (
476   VOID
477   )
478 {
479   UINT64                            RetVal;
480 
481   if (IsListEmpty (&mPagePool)) {
482     //
483     // If page pool is empty, reclaim the used pages and insert one into page pool
484     //
485     ReclaimPages ();
486   }
487 
488   //
489   // Get one free page and remove it from page pool
490   //
491   RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
492   RemoveEntryList (mPagePool.ForwardLink);
493   //
494   // Clean this page and return
495   //
496   ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
497   return RetVal;
498 }
499 
500 /**
501   Page Fault handler for SMM use.
502 
503 **/
504 VOID
SmiDefaultPFHandler(VOID)505 SmiDefaultPFHandler (
506   VOID
507   )
508 {
509   UINT64                            *PageTable;
510   UINT64                            *Pml4;
511   UINT64                            PFAddress;
512   UINTN                             StartBit;
513   UINTN                             EndBit;
514   UINT64                            PTIndex;
515   UINTN                             Index;
516   SMM_PAGE_SIZE_TYPE                PageSize;
517   UINTN                             NumOfPages;
518   UINTN                             PageAttribute;
519   EFI_STATUS                        Status;
520   UINT64                            *UpperEntry;
521 
522   //
523   // Set default SMM page attribute
524   //
525   PageSize = SmmPageSize2M;
526   NumOfPages = 1;
527   PageAttribute = 0;
528 
529   EndBit = 0;
530   Pml4 = (UINT64*)(AsmReadCr3 () & gPhyMask);
531   PFAddress = AsmReadCr2 ();
532 
533   Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
534   //
535   // If platform not support page table attribute, set default SMM page attribute
536   //
537   if (Status != EFI_SUCCESS) {
538     PageSize = SmmPageSize2M;
539     NumOfPages = 1;
540     PageAttribute = 0;
541   }
542   if (PageSize >= MaxSmmPageSizeType) {
543     PageSize = SmmPageSize2M;
544   }
545   if (NumOfPages > 512) {
546     NumOfPages = 512;
547   }
548 
549   switch (PageSize) {
550   case SmmPageSize4K:
551     //
552     // BIT12 to BIT20 is Page Table index
553     //
554     EndBit = 12;
555     break;
556   case SmmPageSize2M:
557     //
558     // BIT21 to BIT29 is Page Directory index
559     //
560     EndBit = 21;
561     PageAttribute |= (UINTN)IA32_PG_PS;
562     break;
563   case SmmPageSize1G:
564     if (!m1GPageTableSupport) {
565       DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));
566       ASSERT (FALSE);
567     }
568     //
569     // BIT30 to BIT38 is Page Directory Pointer Table index
570     //
571     EndBit = 30;
572     PageAttribute |= (UINTN)IA32_PG_PS;
573     break;
574   default:
575     ASSERT (FALSE);
576   }
577 
578   //
579   // If execute-disable is enabled, set NX bit
580   //
581   if (mXdEnabled) {
582     PageAttribute |= IA32_PG_NX;
583   }
584 
585   for (Index = 0; Index < NumOfPages; Index++) {
586     PageTable  = Pml4;
587     UpperEntry = NULL;
588     for (StartBit = 39; StartBit > EndBit; StartBit -= 9) {
589       PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
590       if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
591         //
592         // If the entry is not present, allocate one page from page pool for it
593         //
594         PageTable[PTIndex] = AllocPage () | PAGE_ATTRIBUTE_BITS;
595       } else {
596         //
597         // Save the upper entry address
598         //
599         UpperEntry = PageTable + PTIndex;
600       }
601       //
602       // BIT9 to BIT11 of entry is used to save access record,
603       // initialize value is 7
604       //
605       PageTable[PTIndex] |= (UINT64)IA32_PG_A;
606       SetAccNum (PageTable + PTIndex, 7);
607       PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask);
608     }
609 
610     PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
611     if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
612       //
613       // Check if the entry has already existed, this issue may occur when the different
614       // size page entries created under the same entry
615       //
616       DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
617       DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));
618       ASSERT (FALSE);
619     }
620     //
621     // Fill the new entry
622     //
623     PageTable[PTIndex] = (PFAddress & gPhyMask & ~((1ull << EndBit) - 1)) |
624                          PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
625     if (UpperEntry != NULL) {
626       SetSubEntriesNum (UpperEntry, GetSubEntriesNum (UpperEntry) + 1);
627     }
628     //
629     // Get the next page address if we need to create more page tables
630     //
631     PFAddress += (1ull << EndBit);
632   }
633 }
634 
635 /**
636   ThePage Fault handler wrapper for SMM use.
637 
638   @param  InterruptType    Defines the type of interrupt or exception that
639                            occurred on the processor.This parameter is processor architecture specific.
640   @param  SystemContext    A pointer to the processor context when
641                            the interrupt occurred on the processor.
642 **/
643 VOID
644 EFIAPI
SmiPFHandler(IN EFI_EXCEPTION_TYPE InterruptType,IN EFI_SYSTEM_CONTEXT SystemContext)645 SmiPFHandler (
646     IN EFI_EXCEPTION_TYPE   InterruptType,
647     IN EFI_SYSTEM_CONTEXT   SystemContext
648   )
649 {
650   UINTN             PFAddress;
651 
652   ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
653 
654   AcquireSpinLock (&mPFLock);
655 
656   PFAddress = AsmReadCr2 ();
657 
658   //
659   // If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
660   //
661   if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
662       (PFAddress >= mCpuHotPlugData.SmrrBase) &&
663       (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
664     DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
665     CpuDeadLoop ();
666   }
667 
668   //
669   // If a page fault occurs in SMM range
670   //
671   if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
672       (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
673     if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
674       DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
675       DEBUG_CODE (
676         DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
677       );
678       CpuDeadLoop ();
679     }
680   }
681 
682   if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
683     SmmProfilePFHandler (
684       SystemContext.SystemContextX64->Rip,
685       SystemContext.SystemContextX64->ExceptionData
686       );
687   } else {
688     SmiDefaultPFHandler ();
689   }
690 
691   ReleaseSpinLock (&mPFLock);
692 }
693