1 /** @file
2 * File managing the MMU for ARMv8 architecture
3 *
4 * Copyright (c) 2011-2014, ARM Limited. All rights reserved.
5 * Copyright (c) 2016, Linaro Limited. All rights reserved.
6 *
7 * This program and the accompanying materials
8 * are licensed and made available under the terms and conditions of the BSD License
9 * which accompanies this distribution. The full text of the license may be found at
10 * http://opensource.org/licenses/bsd-license.php
11 *
12 * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
14 *
15 **/
16
17 #include <Uefi.h>
18 #include <Chipset/AArch64.h>
19 #include <Library/BaseMemoryLib.h>
20 #include <Library/CacheMaintenanceLib.h>
21 #include <Library/MemoryAllocationLib.h>
22 #include <Library/ArmLib.h>
23 #include <Library/ArmMmuLib.h>
24 #include <Library/BaseLib.h>
25 #include <Library/DebugLib.h>
26
27 // We use this index definition to define an invalid block entry
28 #define TT_ATTR_INDX_INVALID ((UINT32)~0)
29
30 STATIC
31 UINT64
ArmMemoryAttributeToPageAttribute(IN ARM_MEMORY_REGION_ATTRIBUTES Attributes)32 ArmMemoryAttributeToPageAttribute (
33 IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
34 )
35 {
36 switch (Attributes) {
37 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
38 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
39 return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
40
41 case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
42 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
43 return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
44
45 // Uncached and device mappings are treated as outer shareable by default,
46 case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
47 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
48 return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
49
50 default:
51 ASSERT(0);
52 case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
53 case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
54 if (ArmReadCurrentEL () == AARCH64_EL2)
55 return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
56 else
57 return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
58 }
59 }
60
61 UINT64
PageAttributeToGcdAttribute(IN UINT64 PageAttributes)62 PageAttributeToGcdAttribute (
63 IN UINT64 PageAttributes
64 )
65 {
66 UINT64 GcdAttributes;
67
68 switch (PageAttributes & TT_ATTR_INDX_MASK) {
69 case TT_ATTR_INDX_DEVICE_MEMORY:
70 GcdAttributes = EFI_MEMORY_UC;
71 break;
72 case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
73 GcdAttributes = EFI_MEMORY_WC;
74 break;
75 case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
76 GcdAttributes = EFI_MEMORY_WT;
77 break;
78 case TT_ATTR_INDX_MEMORY_WRITE_BACK:
79 GcdAttributes = EFI_MEMORY_WB;
80 break;
81 default:
82 DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
83 ASSERT (0);
84 // The Global Coherency Domain (GCD) value is defined as a bit set.
85 // Returning 0 means no attribute has been set.
86 GcdAttributes = 0;
87 }
88
89 // Determine protection attributes
90 if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
91 // Read only cases map to write-protect
92 GcdAttributes |= EFI_MEMORY_WP;
93 }
94
95 // Process eXecute Never attribute
96 if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
97 GcdAttributes |= EFI_MEMORY_XP;
98 }
99
100 return GcdAttributes;
101 }
102
103 ARM_MEMORY_REGION_ATTRIBUTES
GcdAttributeToArmAttribute(IN UINT64 GcdAttributes)104 GcdAttributeToArmAttribute (
105 IN UINT64 GcdAttributes
106 )
107 {
108 switch (GcdAttributes & 0xFF) {
109 case EFI_MEMORY_UC:
110 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
111 case EFI_MEMORY_WC:
112 return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
113 case EFI_MEMORY_WT:
114 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
115 case EFI_MEMORY_WB:
116 return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
117 default:
118 DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
119 ASSERT (0);
120 return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
121 }
122 }
123
124 #define MIN_T0SZ 16
125 #define BITS_PER_LEVEL 9
126
127 VOID
GetRootTranslationTableInfo(IN UINTN T0SZ,OUT UINTN * TableLevel,OUT UINTN * TableEntryCount)128 GetRootTranslationTableInfo (
129 IN UINTN T0SZ,
130 OUT UINTN *TableLevel,
131 OUT UINTN *TableEntryCount
132 )
133 {
134 // Get the level of the root table
135 if (TableLevel) {
136 *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
137 }
138
139 if (TableEntryCount) {
140 *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL);
141 }
142 }
143
144 STATIC
145 VOID
ReplaceLiveEntry(IN UINT64 * Entry,IN UINT64 Value)146 ReplaceLiveEntry (
147 IN UINT64 *Entry,
148 IN UINT64 Value
149 )
150 {
151 if (!ArmMmuEnabled ()) {
152 *Entry = Value;
153 } else {
154 ArmReplaceLiveTranslationEntry (Entry, Value);
155 }
156 }
157
158 STATIC
159 VOID
LookupAddresstoRootTable(IN UINT64 MaxAddress,OUT UINTN * T0SZ,OUT UINTN * TableEntryCount)160 LookupAddresstoRootTable (
161 IN UINT64 MaxAddress,
162 OUT UINTN *T0SZ,
163 OUT UINTN *TableEntryCount
164 )
165 {
166 UINTN TopBit;
167
168 // Check the parameters are not NULL
169 ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
170
171 // Look for the highest bit set in MaxAddress
172 for (TopBit = 63; TopBit != 0; TopBit--) {
173 if ((1ULL << TopBit) & MaxAddress) {
174 // MaxAddress top bit is found
175 TopBit = TopBit + 1;
176 break;
177 }
178 }
179 ASSERT (TopBit != 0);
180
181 // Calculate T0SZ from the top bit of the MaxAddress
182 *T0SZ = 64 - TopBit;
183
184 // Get the Table info from T0SZ
185 GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
186 }
187
188 STATIC
189 UINT64*
GetBlockEntryListFromAddress(IN UINT64 * RootTable,IN UINT64 RegionStart,OUT UINTN * TableLevel,IN OUT UINT64 * BlockEntrySize,OUT UINT64 ** LastBlockEntry)190 GetBlockEntryListFromAddress (
191 IN UINT64 *RootTable,
192 IN UINT64 RegionStart,
193 OUT UINTN *TableLevel,
194 IN OUT UINT64 *BlockEntrySize,
195 OUT UINT64 **LastBlockEntry
196 )
197 {
198 UINTN RootTableLevel;
199 UINTN RootTableEntryCount;
200 UINT64 *TranslationTable;
201 UINT64 *BlockEntry;
202 UINT64 *SubTableBlockEntry;
203 UINT64 BlockEntryAddress;
204 UINTN BaseAddressAlignment;
205 UINTN PageLevel;
206 UINTN Index;
207 UINTN IndexLevel;
208 UINTN T0SZ;
209 UINT64 Attributes;
210 UINT64 TableAttributes;
211
212 // Initialize variable
213 BlockEntry = NULL;
214
215 // Ensure the parameters are valid
216 if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
217 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
218 return NULL;
219 }
220
221 // Ensure the Region is aligned on 4KB boundary
222 if ((RegionStart & (SIZE_4KB - 1)) != 0) {
223 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
224 return NULL;
225 }
226
227 // Ensure the required size is aligned on 4KB boundary and not 0
228 if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
229 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
230 return NULL;
231 }
232
233 T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
234 // Get the Table info from T0SZ
235 GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
236
237 // If the start address is 0x0 then we use the size of the region to identify the alignment
238 if (RegionStart == 0) {
239 // Identify the highest possible alignment for the Region Size
240 BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
241 } else {
242 // Identify the highest possible alignment for the Base Address
243 BaseAddressAlignment = LowBitSet64 (RegionStart);
244 }
245
246 // Identify the Page Level the RegionStart must belong to. Note that PageLevel
247 // should be at least 1 since block translations are not supported at level 0
248 PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
249
250 // If the required size is smaller than the current block size then we need to go to the page below.
251 // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
252 // of the allocation size
253 while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
254 // It does not fit so we need to go a page level above
255 PageLevel++;
256 }
257
258 //
259 // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
260 //
261
262 TranslationTable = RootTable;
263 for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
264 BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
265
266 if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
267 // Go to the next table
268 TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
269
270 // If we are at the last level then update the last level to next level
271 if (IndexLevel == PageLevel) {
272 // Enter the next level
273 PageLevel++;
274 }
275 } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
276 // If we are not at the last level then we need to split this BlockEntry
277 if (IndexLevel != PageLevel) {
278 // Retrieve the attributes from the block entry
279 Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
280
281 // Convert the block entry attributes into Table descriptor attributes
282 TableAttributes = TT_TABLE_AP_NO_PERMISSION;
283 if (Attributes & TT_NS) {
284 TableAttributes = TT_TABLE_NS;
285 }
286
287 // Get the address corresponding at this entry
288 BlockEntryAddress = RegionStart;
289 BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
290 // Shift back to right to set zero before the effective address
291 BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
292
293 // Set the correct entry type for the next page level
294 if ((IndexLevel + 1) == 3) {
295 Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
296 } else {
297 Attributes |= TT_TYPE_BLOCK_ENTRY;
298 }
299
300 // Create a new translation table
301 TranslationTable = AllocatePages (1);
302 if (TranslationTable == NULL) {
303 return NULL;
304 }
305
306 // Populate the newly created lower level table
307 SubTableBlockEntry = TranslationTable;
308 for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
309 *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
310 SubTableBlockEntry++;
311 }
312
313 // Fill the BlockEntry with the new TranslationTable
314 ReplaceLiveEntry (BlockEntry,
315 ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);
316 }
317 } else {
318 if (IndexLevel != PageLevel) {
319 //
320 // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
321 //
322
323 // Create a new translation table
324 TranslationTable = AllocatePages (1);
325 if (TranslationTable == NULL) {
326 return NULL;
327 }
328
329 ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
330
331 // Fill the new BlockEntry with the TranslationTable
332 *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
333 }
334 }
335 }
336
337 // Expose the found PageLevel to the caller
338 *TableLevel = PageLevel;
339
340 // Now, we have the Table Level we can get the Block Size associated to this table
341 *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
342
343 // The last block of the root table depends on the number of entry in this table,
344 // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
345 *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
346 (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
347
348 return BlockEntry;
349 }
350
351 STATIC
352 RETURN_STATUS
UpdateRegionMapping(IN UINT64 * RootTable,IN UINT64 RegionStart,IN UINT64 RegionLength,IN UINT64 Attributes,IN UINT64 BlockEntryMask)353 UpdateRegionMapping (
354 IN UINT64 *RootTable,
355 IN UINT64 RegionStart,
356 IN UINT64 RegionLength,
357 IN UINT64 Attributes,
358 IN UINT64 BlockEntryMask
359 )
360 {
361 UINT32 Type;
362 UINT64 *BlockEntry;
363 UINT64 *LastBlockEntry;
364 UINT64 BlockEntrySize;
365 UINTN TableLevel;
366
367 // Ensure the Length is aligned on 4KB boundary
368 if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
369 ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
370 return RETURN_INVALID_PARAMETER;
371 }
372
373 do {
374 // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
375 // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
376 BlockEntrySize = RegionLength;
377 BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
378 if (BlockEntry == NULL) {
379 // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
380 return RETURN_OUT_OF_RESOURCES;
381 }
382
383 if (TableLevel != 3) {
384 Type = TT_TYPE_BLOCK_ENTRY;
385 } else {
386 Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
387 }
388
389 do {
390 // Fill the Block Entry with attribute and output block address
391 *BlockEntry &= BlockEntryMask;
392 *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
393
394 // Go to the next BlockEntry
395 RegionStart += BlockEntrySize;
396 RegionLength -= BlockEntrySize;
397 BlockEntry++;
398
399 // Break the inner loop when next block is a table
400 // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
401 if (TableLevel != 3 &&
402 (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
403 break;
404 }
405 } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
406 } while (RegionLength != 0);
407
408 return RETURN_SUCCESS;
409 }
410
411 STATIC
412 RETURN_STATUS
FillTranslationTable(IN UINT64 * RootTable,IN ARM_MEMORY_REGION_DESCRIPTOR * MemoryRegion)413 FillTranslationTable (
414 IN UINT64 *RootTable,
415 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
416 )
417 {
418 return UpdateRegionMapping (
419 RootTable,
420 MemoryRegion->VirtualBase,
421 MemoryRegion->Length,
422 ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
423 0
424 );
425 }
426
427 RETURN_STATUS
SetMemoryAttributes(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length,IN UINT64 Attributes,IN EFI_PHYSICAL_ADDRESS VirtualMask)428 SetMemoryAttributes (
429 IN EFI_PHYSICAL_ADDRESS BaseAddress,
430 IN UINT64 Length,
431 IN UINT64 Attributes,
432 IN EFI_PHYSICAL_ADDRESS VirtualMask
433 )
434 {
435 RETURN_STATUS Status;
436 ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
437 UINT64 *TranslationTable;
438
439 MemoryRegion.PhysicalBase = BaseAddress;
440 MemoryRegion.VirtualBase = BaseAddress;
441 MemoryRegion.Length = Length;
442 MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
443
444 TranslationTable = ArmGetTTBR0BaseAddress ();
445
446 Status = FillTranslationTable (TranslationTable, &MemoryRegion);
447 if (RETURN_ERROR (Status)) {
448 return Status;
449 }
450
451 // Invalidate all TLB entries so changes are synced
452 ArmInvalidateTlb ();
453
454 return RETURN_SUCCESS;
455 }
456
457 STATIC
458 RETURN_STATUS
SetMemoryRegionAttribute(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length,IN UINT64 Attributes,IN UINT64 BlockEntryMask)459 SetMemoryRegionAttribute (
460 IN EFI_PHYSICAL_ADDRESS BaseAddress,
461 IN UINT64 Length,
462 IN UINT64 Attributes,
463 IN UINT64 BlockEntryMask
464 )
465 {
466 RETURN_STATUS Status;
467 UINT64 *RootTable;
468
469 RootTable = ArmGetTTBR0BaseAddress ();
470
471 Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
472 if (RETURN_ERROR (Status)) {
473 return Status;
474 }
475
476 // Invalidate all TLB entries so changes are synced
477 ArmInvalidateTlb ();
478
479 return RETURN_SUCCESS;
480 }
481
482 RETURN_STATUS
ArmSetMemoryRegionNoExec(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length)483 ArmSetMemoryRegionNoExec (
484 IN EFI_PHYSICAL_ADDRESS BaseAddress,
485 IN UINT64 Length
486 )
487 {
488 UINT64 Val;
489
490 if (ArmReadCurrentEL () == AARCH64_EL1) {
491 Val = TT_PXN_MASK | TT_UXN_MASK;
492 } else {
493 Val = TT_XN_MASK;
494 }
495
496 return SetMemoryRegionAttribute (
497 BaseAddress,
498 Length,
499 Val,
500 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
501 }
502
503 RETURN_STATUS
ArmClearMemoryRegionNoExec(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length)504 ArmClearMemoryRegionNoExec (
505 IN EFI_PHYSICAL_ADDRESS BaseAddress,
506 IN UINT64 Length
507 )
508 {
509 UINT64 Mask;
510
511 // XN maps to UXN in the EL1&0 translation regime
512 Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
513
514 return SetMemoryRegionAttribute (
515 BaseAddress,
516 Length,
517 0,
518 Mask);
519 }
520
521 RETURN_STATUS
ArmSetMemoryRegionReadOnly(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length)522 ArmSetMemoryRegionReadOnly (
523 IN EFI_PHYSICAL_ADDRESS BaseAddress,
524 IN UINT64 Length
525 )
526 {
527 return SetMemoryRegionAttribute (
528 BaseAddress,
529 Length,
530 TT_AP_RO_RO,
531 ~TT_ADDRESS_MASK_BLOCK_ENTRY);
532 }
533
534 RETURN_STATUS
ArmClearMemoryRegionReadOnly(IN EFI_PHYSICAL_ADDRESS BaseAddress,IN UINT64 Length)535 ArmClearMemoryRegionReadOnly (
536 IN EFI_PHYSICAL_ADDRESS BaseAddress,
537 IN UINT64 Length
538 )
539 {
540 return SetMemoryRegionAttribute (
541 BaseAddress,
542 Length,
543 TT_AP_RW_RW,
544 ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
545 }
546
547 RETURN_STATUS
548 EFIAPI
ArmConfigureMmu(IN ARM_MEMORY_REGION_DESCRIPTOR * MemoryTable,OUT VOID ** TranslationTableBase OPTIONAL,OUT UINTN * TranslationTableSize OPTIONAL)549 ArmConfigureMmu (
550 IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
551 OUT VOID **TranslationTableBase OPTIONAL,
552 OUT UINTN *TranslationTableSize OPTIONAL
553 )
554 {
555 VOID* TranslationTable;
556 VOID* TranslationTableBuffer;
557 UINT32 TranslationTableAttribute;
558 UINT64 MaxAddress;
559 UINTN T0SZ;
560 UINTN RootTableEntryCount;
561 UINTN RootTableEntrySize;
562 UINT64 TCR;
563 RETURN_STATUS Status;
564
565 if(MemoryTable == NULL) {
566 ASSERT (MemoryTable != NULL);
567 return RETURN_INVALID_PARAMETER;
568 }
569
570 // Cover the entire GCD memory space
571 MaxAddress = (1UL << PcdGet8 (PcdPrePiCpuMemorySize)) - 1;
572
573 // Lookup the Table Level to get the information
574 LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
575
576 //
577 // Set TCR that allows us to retrieve T0SZ in the subsequent functions
578 //
579 // Ideally we will be running at EL2, but should support EL1 as well.
580 // UEFI should not run at EL3.
581 if (ArmReadCurrentEL () == AARCH64_EL2) {
582 //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
583 TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
584
585 // Set the Physical Address Size using MaxAddress
586 if (MaxAddress < SIZE_4GB) {
587 TCR |= TCR_PS_4GB;
588 } else if (MaxAddress < SIZE_64GB) {
589 TCR |= TCR_PS_64GB;
590 } else if (MaxAddress < SIZE_1TB) {
591 TCR |= TCR_PS_1TB;
592 } else if (MaxAddress < SIZE_4TB) {
593 TCR |= TCR_PS_4TB;
594 } else if (MaxAddress < SIZE_16TB) {
595 TCR |= TCR_PS_16TB;
596 } else if (MaxAddress < SIZE_256TB) {
597 TCR |= TCR_PS_256TB;
598 } else {
599 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
600 ASSERT (0); // Bigger than 48-bit memory space are not supported
601 return RETURN_UNSUPPORTED;
602 }
603 } else if (ArmReadCurrentEL () == AARCH64_EL1) {
604 // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
605 TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
606
607 // Set the Physical Address Size using MaxAddress
608 if (MaxAddress < SIZE_4GB) {
609 TCR |= TCR_IPS_4GB;
610 } else if (MaxAddress < SIZE_64GB) {
611 TCR |= TCR_IPS_64GB;
612 } else if (MaxAddress < SIZE_1TB) {
613 TCR |= TCR_IPS_1TB;
614 } else if (MaxAddress < SIZE_4TB) {
615 TCR |= TCR_IPS_4TB;
616 } else if (MaxAddress < SIZE_16TB) {
617 TCR |= TCR_IPS_16TB;
618 } else if (MaxAddress < SIZE_256TB) {
619 TCR |= TCR_IPS_256TB;
620 } else {
621 DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
622 ASSERT (0); // Bigger than 48-bit memory space are not supported
623 return RETURN_UNSUPPORTED;
624 }
625 } else {
626 ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
627 return RETURN_UNSUPPORTED;
628 }
629
630 //
631 // Translation table walks are always cache coherent on ARMv8-A, so cache
632 // maintenance on page tables is never needed. Since there is a risk of
633 // loss of coherency when using mismatched attributes, and given that memory
634 // is mapped cacheable except for extraordinary cases (such as non-coherent
635 // DMA), have the page table walker perform cached accesses as well, and
636 // assert below that that matches the attributes we use for CPU accesses to
637 // the region.
638 //
639 TCR |= TCR_SH_INNER_SHAREABLE |
640 TCR_RGN_OUTER_WRITE_BACK_ALLOC |
641 TCR_RGN_INNER_WRITE_BACK_ALLOC;
642
643 // Set TCR
644 ArmSetTCR (TCR);
645
646 // Allocate pages for translation table. Pool allocations are 8 byte aligned,
647 // but we may require a higher alignment based on the size of the root table.
648 RootTableEntrySize = RootTableEntryCount * sizeof(UINT64);
649 if (RootTableEntrySize < EFI_PAGE_SIZE / 2) {
650 TranslationTableBuffer = AllocatePool (2 * RootTableEntrySize - 8);
651 //
652 // Naturally align the root table. Preserves possible NULL value
653 //
654 TranslationTable = (VOID *)((UINTN)(TranslationTableBuffer - 1) | (RootTableEntrySize - 1)) + 1;
655 } else {
656 TranslationTable = AllocatePages (1);
657 TranslationTableBuffer = NULL;
658 }
659 if (TranslationTable == NULL) {
660 return RETURN_OUT_OF_RESOURCES;
661 }
662 // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
663 // functions without needing to pass this value across the functions. The MMU is only enabled
664 // after the translation tables are populated.
665 ArmSetTTBR0 (TranslationTable);
666
667 if (TranslationTableBase != NULL) {
668 *TranslationTableBase = TranslationTable;
669 }
670
671 if (TranslationTableSize != NULL) {
672 *TranslationTableSize = RootTableEntrySize;
673 }
674
675 ZeroMem (TranslationTable, RootTableEntrySize);
676
677 // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
678 ArmDisableMmu ();
679 ArmDisableDataCache ();
680 ArmDisableInstructionCache ();
681
682 // Make sure nothing sneaked into the cache
683 ArmCleanInvalidateDataCache ();
684 ArmInvalidateInstructionCache ();
685
686 TranslationTableAttribute = TT_ATTR_INDX_INVALID;
687 while (MemoryTable->Length != 0) {
688
689 DEBUG_CODE_BEGIN ();
690 // Find the memory attribute for the Translation Table
691 if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase &&
692 (UINTN)TranslationTable + RootTableEntrySize <= MemoryTable->PhysicalBase +
693 MemoryTable->Length) {
694 TranslationTableAttribute = MemoryTable->Attributes;
695 }
696 DEBUG_CODE_END ();
697
698 Status = FillTranslationTable (TranslationTable, MemoryTable);
699 if (RETURN_ERROR (Status)) {
700 goto FREE_TRANSLATION_TABLE;
701 }
702 MemoryTable++;
703 }
704
705 ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK ||
706 TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK);
707
708 ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
709 MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
710 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
711 MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
712
713 ArmDisableAlignmentCheck ();
714 ArmEnableInstructionCache ();
715 ArmEnableDataCache ();
716
717 ArmEnableMmu ();
718 return RETURN_SUCCESS;
719
720 FREE_TRANSLATION_TABLE:
721 if (TranslationTableBuffer != NULL) {
722 FreePool (TranslationTableBuffer);
723 } else {
724 FreePages (TranslationTable, 1);
725 }
726 return Status;
727 }
728
729 RETURN_STATUS
730 EFIAPI
ArmMmuBaseLibConstructor(VOID)731 ArmMmuBaseLibConstructor (
732 VOID
733 )
734 {
735 extern UINT32 ArmReplaceLiveTranslationEntrySize;
736
737 //
738 // The ArmReplaceLiveTranslationEntry () helper function may be invoked
739 // with the MMU off so we have to ensure that it gets cleaned to the PoC
740 //
741 WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
742 ArmReplaceLiveTranslationEntrySize);
743
744 return RETURN_SUCCESS;
745 }
746