1 /** @file
2 SMM CPU misc functions for x64 arch specific.
3
4 Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
5 This program and the accompanying materials
6 are licensed and made available under the terms and conditions of the BSD License
7 which accompanies this distribution. The full text of the license may be found at
8 http://opensource.org/licenses/bsd-license.php
9
10 THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12
13 **/
14
15 #include "PiSmmCpuDxeSmm.h"
16
17 EFI_PHYSICAL_ADDRESS mGdtBuffer;
18 UINTN mGdtBufferSize;
19
20 /**
21 Initialize IDT for SMM Stack Guard.
22
23 **/
24 VOID
25 EFIAPI
InitializeIDTSmmStackGuard(VOID)26 InitializeIDTSmmStackGuard (
27 VOID
28 )
29 {
30 IA32_IDT_GATE_DESCRIPTOR *IdtGate;
31
32 //
33 // If SMM Stack Guard feature is enabled, set the IST field of
34 // the interrupt gate for Page Fault Exception to be 1
35 //
36 IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
37 IdtGate += EXCEPT_IA32_PAGE_FAULT;
38 IdtGate->Bits.Reserved_0 = 1;
39 }
40
41 /**
42 Initialize Gdt for all processors.
43
44 @param[in] Cr3 CR3 value.
45 @param[out] GdtStepSize The step size for GDT table.
46
47 @return GdtBase for processor 0.
48 GdtBase for processor X is: GdtBase + (GdtStepSize * X)
49 **/
50 VOID *
InitGdt(IN UINTN Cr3,OUT UINTN * GdtStepSize)51 InitGdt (
52 IN UINTN Cr3,
53 OUT UINTN *GdtStepSize
54 )
55 {
56 UINTN Index;
57 IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
58 UINTN TssBase;
59 UINTN GdtTssTableSize;
60 UINT8 *GdtTssTables;
61 UINTN GdtTableStepSize;
62
63 //
64 // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
65 // on each SMI entry.
66 //
67 GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
68 mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
69 GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
70 ASSERT (GdtTssTables != NULL);
71 mGdtBuffer = (UINTN)GdtTssTables;
72 GdtTableStepSize = GdtTssTableSize;
73
74 for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
75 CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
76
77 //
78 // Fixup TSS descriptors
79 //
80 TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
81 GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
82 GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
83 GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
84 GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
85
86 if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
87 //
88 // Setup top of known good stack as IST1 for each processor.
89 //
90 *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize);
91 }
92 }
93
94 *GdtStepSize = GdtTableStepSize;
95 return GdtTssTables;
96 }
97
98 /**
99 This function sets GDT/IDT buffer to be RO and XP.
100 **/
101 VOID
PatchGdtIdtMap(VOID)102 PatchGdtIdtMap (
103 VOID
104 )
105 {
106 EFI_PHYSICAL_ADDRESS BaseAddress;
107 UINTN Size;
108
109 //
110 // GDT
111 //
112 DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - GDT:\n"));
113
114 BaseAddress = mGdtBuffer;
115 Size = ALIGN_VALUE(mGdtBufferSize, SIZE_4KB);
116 SmmSetMemoryAttributes (
117 BaseAddress,
118 Size,
119 EFI_MEMORY_RO
120 );
121 SmmSetMemoryAttributes (
122 BaseAddress,
123 Size,
124 EFI_MEMORY_XP
125 );
126
127 //
128 // IDT
129 //
130 DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - IDT:\n"));
131
132 BaseAddress = gcSmiIdtr.Base;
133 Size = ALIGN_VALUE(gcSmiIdtr.Limit + 1, SIZE_4KB);
134 SmmSetMemoryAttributes (
135 BaseAddress,
136 Size,
137 EFI_MEMORY_RO
138 );
139 SmmSetMemoryAttributes (
140 BaseAddress,
141 Size,
142 EFI_MEMORY_XP
143 );
144 }
145
146 /**
147 Get Protected mode code segment from current GDT table.
148
149 @return Protected mode code segment value.
150 **/
151 UINT16
GetProtectedModeCS(VOID)152 GetProtectedModeCS (
153 VOID
154 )
155 {
156 IA32_DESCRIPTOR GdtrDesc;
157 IA32_SEGMENT_DESCRIPTOR *GdtEntry;
158 UINTN GdtEntryCount;
159 UINT16 Index;
160
161 Index = (UINT16) -1;
162 AsmReadGdtr (&GdtrDesc);
163 GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
164 GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
165 for (Index = 0; Index < GdtEntryCount; Index++) {
166 if (GdtEntry->Bits.L == 0) {
167 if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.L == 0) {
168 break;
169 }
170 }
171 GdtEntry++;
172 }
173 ASSERT (Index != -1);
174 return Index * 8;
175 }
176
177 /**
178 Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
179
180 @param[in] ApHltLoopCode The address of the safe hlt-loop function.
181 @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
182 @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
183
184 **/
185 VOID
TransferApToSafeState(IN UINTN ApHltLoopCode,IN UINTN TopOfStack,IN UINTN NumberToFinishAddress)186 TransferApToSafeState (
187 IN UINTN ApHltLoopCode,
188 IN UINTN TopOfStack,
189 IN UINTN NumberToFinishAddress
190 )
191 {
192 AsmDisablePaging64 (
193 GetProtectedModeCS (),
194 (UINT32)ApHltLoopCode,
195 (UINT32)NumberToFinishAddress,
196 0,
197 (UINT32)TopOfStack
198 );
199 //
200 // It should never reach here
201 //
202 ASSERT (FALSE);
203 }
204
205