1 /*
2 * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <limits.h>
11 #include <stdint.h>
12
13 #include <arch.h>
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/debug.h>
17 #include "gpt_rme_private.h"
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/smccc.h>
20 #include <lib/spinlock.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22
23 #if !ENABLE_RME
24 #error "ENABLE_RME must be enabled to use the GPT library"
25 #endif
26
27 /*
28 * Lookup T from PPS
29 *
30 * PPS Size T
31 * 0b000 4GB 32
32 * 0b001 64GB 36
33 * 0b010 1TB 40
34 * 0b011 4TB 42
35 * 0b100 16TB 44
36 * 0b101 256TB 48
37 * 0b110 4PB 52
38 *
39 * See section 15.1.27 of the RME specification.
40 */
41 static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
42 PPS_1TB_T, PPS_4TB_T,
43 PPS_16TB_T, PPS_256TB_T,
44 PPS_4PB_T};
45
46 /*
47 * Lookup P from PGS
48 *
49 * PGS Size P
50 * 0b00 4KB 12
51 * 0b10 16KB 14
52 * 0b01 64KB 16
53 *
54 * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
55 *
56 * See section 15.1.27 of the RME specification.
57 */
58 static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
59
60 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
61 uint64_t l1_desc);
62 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
63 uint64_t l1_desc);
64 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
65 uint64_t l1_desc);
66
67 /*
68 * This structure contains GPT configuration data
69 */
70 typedef struct {
71 uintptr_t plat_gpt_l0_base;
72 gpccr_pps_e pps;
73 gpt_t_val_e t;
74 gpccr_pgs_e pgs;
75 gpt_p_val_e p;
76 } gpt_config_t;
77
78 static gpt_config_t gpt_config;
79
80 /*
81 * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
82 * +-------+------------+
83 * | PGS | L1 entries |
84 * +-------+------------+
85 * | 4KB | 32 |
86 * +-------+------------+
87 * | 16KB | 8 |
88 * +-------+------------+
89 * | 64KB | 2 |
90 * +-------+------------+
91 */
92 static unsigned int gpt_l1_cnt_2mb;
93
94 /*
95 * Mask for the L1 index field, depending on
96 * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
97 * +---------+-------------------------------+
98 * | | PGS |
99 * +---------+----------+----------+---------+
100 * | L0GPTSZ | 4KB | 16KB | 64KB |
101 * +---------+----------+----------+---------+
102 * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
103 * +---------+----------+----------+---------+
104 * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
105 * +---------+----------+----------+---------+
106 * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
107 * +---------+----------+----------+---------+
108 * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
109 * +---------+----------+----------+---------+
110 */
111 static uint64_t gpt_l1_index_mask;
112
113 /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
114 #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
115 #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
116 #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
117
118 /* Size in bytes of L1 entries in 2MB, 32MB */
119 #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
120 #define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
121
122 /* Get the index into the L1 table from a physical address */
123 #define GPT_L1_INDEX(_pa) \
124 (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
125
126 /* These variables are used during initialization of the L1 tables */
127 static uintptr_t gpt_l1_tbl;
128
129 /* These variable is used during runtime */
130 #if (RME_GPT_BITLOCK_BLOCK == 0)
131 /*
132 * The GPTs are protected by a global spinlock to ensure
133 * that multiple CPUs do not attempt to change the descriptors at once.
134 */
135 static spinlock_t gpt_lock;
136 #else
137
138 /* Bitlocks base address */
139 static bitlock_t *gpt_bitlock_base;
140 #endif
141
142 /* Lock/unlock macros for GPT entries */
143 #if (RME_GPT_BITLOCK_BLOCK == 0)
144 /*
145 * Access to GPT is controlled by a global lock to ensure
146 * that no more than one CPU is allowed to make changes at any
147 * given time.
148 */
149 #define GPT_LOCK spin_lock(&gpt_lock)
150 #define GPT_UNLOCK spin_unlock(&gpt_lock)
151 #else
152 /*
153 * Access to a block of memory is controlled by a bitlock.
154 * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
155 */
156 #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask)
157 #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask)
158 #endif
159
tlbi_page_dsbosh(uintptr_t base)160 static void tlbi_page_dsbosh(uintptr_t base)
161 {
162 /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
163 static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
164 { tlbirpalos_4k, ~(SZ_4K - 1UL) },
165 { tlbirpalos_64k, ~(SZ_64K - 1UL) },
166 { tlbirpalos_16k, ~(SZ_16K - 1UL) }
167 };
168
169 tlbi_page_lookup[gpt_config.pgs].function(
170 base & tlbi_page_lookup[gpt_config.pgs].mask);
171 dsbosh();
172 }
173
174 /*
175 * Helper function to fill out GPI entries in a single L1 table
176 * with Granules or Contiguous descriptor.
177 *
178 * Parameters
179 * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
180 * l1_desc GPT Granules or Contiguous descriptor set this range to
181 * cnt Number of double 128-bit L1 entries to fill
182 *
183 */
fill_desc(uint64_t * l1,uint64_t l1_desc,unsigned int cnt)184 static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
185 {
186 uint128_t *l1_quad = (uint128_t *)l1;
187 uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
188
189 VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
190
191 for (unsigned int i = 0U; i < cnt; i++) {
192 *l1_quad++ = l1_quad_desc;
193 }
194 }
195
shatter_2mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)196 static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
197 uint64_t l1_desc)
198 {
199 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
200
201 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
202 __func__, base, l1_desc);
203
204 /* Convert 2MB Contiguous block to Granules */
205 fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
206 }
207
shatter_32mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)208 static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
209 uint64_t l1_desc)
210 {
211 unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
212 const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
213 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
214 uint64_t *l1;
215
216 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
217 __func__, base, l1_desc);
218
219 /* Get index corresponding to 32MB aligned address */
220 idx = GPT_L1_INDEX(ALIGN_32MB(base));
221 l1 = &gpi_info->gpt_l1_addr[idx];
222
223 /* 16 x 2MB blocks in 32MB */
224 for (unsigned int i = 0U; i < 16U; i++) {
225 /* Fill with Granules or Contiguous descriptors */
226 fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
227 L1_QWORDS_2MB);
228 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
229 }
230 }
231
shatter_512mb(uintptr_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)232 static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
233 uint64_t l1_desc)
234 {
235 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
236 const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
237 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
238 uint64_t *l1;
239
240 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
241 __func__, base, l1_desc);
242
243 /* Get index corresponding to 512MB aligned address */
244 idx = GPT_L1_INDEX(ALIGN_512MB(base));
245 l1 = &gpi_info->gpt_l1_addr[idx];
246
247 /* 16 x 32MB blocks in 512MB */
248 for (unsigned int i = 0U; i < 16U; i++) {
249 if (l1 == l1_32mb) {
250 /* Shatter this 32MB block */
251 shatter_32mb(base, gpi_info, l1_desc);
252 } else {
253 /* Fill 32MB with Contiguous descriptors */
254 fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
255 }
256
257 l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
258 }
259 }
260
261 /*
262 * This function checks to see if a GPI value is valid.
263 *
264 * These are valid GPI values.
265 * GPT_GPI_NO_ACCESS U(0x0)
266 * GPT_GPI_SECURE U(0x8)
267 * GPT_GPI_NS U(0x9)
268 * GPT_GPI_ROOT U(0xA)
269 * GPT_GPI_REALM U(0xB)
270 * GPT_GPI_ANY U(0xF)
271 *
272 * Parameters
273 * gpi GPI to check for validity.
274 *
275 * Return
276 * true for a valid GPI, false for an invalid one.
277 */
is_gpi_valid(unsigned int gpi)278 static bool is_gpi_valid(unsigned int gpi)
279 {
280 if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
281 ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
282 return true;
283 }
284 return false;
285 }
286
287 /*
288 * This function checks to see if two PAS regions overlap.
289 *
290 * Parameters
291 * base_1: base address of first PAS
292 * size_1: size of first PAS
293 * base_2: base address of second PAS
294 * size_2: size of second PAS
295 *
296 * Return
297 * True if PAS regions overlap, false if they do not.
298 */
check_pas_overlap(uintptr_t base_1,size_t size_1,uintptr_t base_2,size_t size_2)299 static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
300 uintptr_t base_2, size_t size_2)
301 {
302 if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
303 return true;
304 }
305 return false;
306 }
307
308 /*
309 * This helper function checks to see if a PAS region from index 0 to
310 * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
311 *
312 * Parameters
313 * l0_idx: Index of the L0 entry to check
314 * pas_regions: PAS region array
315 * pas_idx: Upper bound of the PAS array index.
316 *
317 * Return
318 * True if a PAS region occupies the L0 region in question, false if not.
319 */
does_previous_pas_exist_here(unsigned int l0_idx,pas_region_t * pas_regions,unsigned int pas_idx)320 static bool does_previous_pas_exist_here(unsigned int l0_idx,
321 pas_region_t *pas_regions,
322 unsigned int pas_idx)
323 {
324 /* Iterate over PAS regions up to pas_idx */
325 for (unsigned int i = 0U; i < pas_idx; i++) {
326 if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
327 GPT_L0GPTSZ_ACTUAL_SIZE,
328 pas_regions[i].base_pa, pas_regions[i].size)) {
329 return true;
330 }
331 }
332 return false;
333 }
334
335 /*
336 * This function iterates over all of the PAS regions and checks them to ensure
337 * proper alignment of base and size, that the GPI is valid, and that no regions
338 * overlap. As a part of the overlap checks, this function checks existing L0
339 * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
340 * is called multiple times to place L1 tables in different areas of memory. It
341 * also counts the number of L1 tables needed and returns it on success.
342 *
343 * Parameters
344 * *pas_regions Pointer to array of PAS region structures.
345 * pas_region_cnt Total number of PAS regions in the array.
346 *
347 * Return
348 * Negative Linux error code in the event of a failure, number of L1 regions
349 * required when successful.
350 */
validate_pas_mappings(pas_region_t * pas_regions,unsigned int pas_region_cnt)351 static int validate_pas_mappings(pas_region_t *pas_regions,
352 unsigned int pas_region_cnt)
353 {
354 unsigned int idx;
355 unsigned int l1_cnt = 0U;
356 unsigned int pas_l1_cnt;
357 uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
358
359 assert(pas_regions != NULL);
360 assert(pas_region_cnt != 0U);
361
362 for (idx = 0U; idx < pas_region_cnt; idx++) {
363 /* Check for arithmetic overflow in region */
364 if ((ULONG_MAX - pas_regions[idx].base_pa) <
365 pas_regions[idx].size) {
366 ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
367 return -EOVERFLOW;
368 }
369
370 /* Initial checks for PAS validity */
371 if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
372 GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
373 !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
374 ERROR("GPT: PAS[%u] is invalid!\n", idx);
375 return -EFAULT;
376 }
377
378 /*
379 * Make sure this PAS does not overlap with another one. We
380 * start from idx + 1 instead of 0 since prior PAS mappings will
381 * have already checked themselves against this one.
382 */
383 for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
384 if (check_pas_overlap(pas_regions[idx].base_pa,
385 pas_regions[idx].size,
386 pas_regions[i].base_pa,
387 pas_regions[i].size)) {
388 ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
389 i, idx);
390 return -EFAULT;
391 }
392 }
393
394 /*
395 * Since this function can be called multiple times with
396 * separate L1 tables we need to check the existing L0 mapping
397 * to see if this PAS would fall into one that has already been
398 * initialized.
399 */
400 for (unsigned int i =
401 (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
402 i <= GPT_L0_IDX(pas_regions[idx].base_pa +
403 pas_regions[idx].size - 1UL);
404 i++) {
405 if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
406 (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
407 /* This descriptor is unused so continue */
408 continue;
409 }
410
411 /*
412 * This descriptor has been initialized in a previous
413 * call to this function so cannot be initialized again.
414 */
415 ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
416 idx, i);
417 return -EFAULT;
418 }
419
420 /* Check for block mapping (L0) type */
421 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
422 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
423 /* Make sure base and size are block-aligned */
424 if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
425 !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
426 ERROR("GPT: PAS[%u] is not block-aligned!\n",
427 idx);
428 return -EFAULT;
429 }
430
431 continue;
432 }
433
434 /* Check for granule mapping (L1) type */
435 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
436 GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
437 /* Make sure base and size are granule-aligned */
438 if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
439 !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
440 ERROR("GPT: PAS[%u] is not granule-aligned!\n",
441 idx);
442 return -EFAULT;
443 }
444
445 /* Find how many L1 tables this PAS occupies */
446 pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
447 pas_regions[idx].size - 1UL) -
448 GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
449
450 /*
451 * This creates a situation where, if multiple PAS
452 * regions occupy the same table descriptor, we can get
453 * an artificially high total L1 table count. The way we
454 * handle this is by checking each PAS against those
455 * before it in the array, and if they both occupy the
456 * same PAS we subtract from pas_l1_cnt and only the
457 * first PAS in the array gets to count it.
458 */
459
460 /*
461 * If L1 count is greater than 1 we know the start and
462 * end PAs are in different L0 regions so we must check
463 * both for overlap against other PAS.
464 */
465 if (pas_l1_cnt > 1) {
466 if (does_previous_pas_exist_here(
467 GPT_L0_IDX(pas_regions[idx].base_pa +
468 pas_regions[idx].size - 1UL),
469 pas_regions, idx)) {
470 pas_l1_cnt--;
471 }
472 }
473
474 if (does_previous_pas_exist_here(
475 GPT_L0_IDX(pas_regions[idx].base_pa),
476 pas_regions, idx)) {
477 pas_l1_cnt--;
478 }
479
480 l1_cnt += pas_l1_cnt;
481 continue;
482 }
483
484 /* If execution reaches this point, mapping type is invalid */
485 ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
486 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
487 return -EINVAL;
488 }
489
490 return l1_cnt;
491 }
492
493 /*
494 * This function validates L0 initialization parameters.
495 *
496 * Parameters
497 * l0_mem_base Base address of memory used for L0 tables.
498 * l0_mem_size Size of memory available for L0 tables.
499 *
500 * Return
501 * Negative Linux error code in the event of a failure, 0 for success.
502 */
validate_l0_params(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)503 static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
504 size_t l0_mem_size)
505 {
506 size_t l0_alignment, locks_size = 0;
507
508 /*
509 * Make sure PPS is valid and then store it since macros need this value
510 * to work.
511 */
512 if (pps > GPT_PPS_MAX) {
513 ERROR("GPT: Invalid PPS: 0x%x\n", pps);
514 return -EINVAL;
515 }
516 gpt_config.pps = pps;
517 gpt_config.t = gpt_t_lookup[pps];
518
519 /* Alignment must be the greater of 4KB or l0 table size */
520 l0_alignment = PAGE_SIZE_4KB;
521 if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
522 l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
523 }
524
525 /* Check base address */
526 if ((l0_mem_base == 0UL) ||
527 ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
528 ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
529 return -EFAULT;
530 }
531
532 #if (RME_GPT_BITLOCK_BLOCK != 0)
533 /*
534 * Size of bitlocks in bytes for the protected address space
535 * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
536 */
537 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
538 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
539
540 /*
541 * If protected space size is less than the size covered
542 * by 'bitlock' structure, check for a single bitlock.
543 */
544 if (locks_size < LOCK_SIZE) {
545 locks_size = LOCK_SIZE;
546 }
547 #endif
548 /* Check size for L0 tables and bitlocks */
549 if (l0_mem_size < (GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size)) {
550 ERROR("GPT: Inadequate L0 memory\n");
551 ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
552 GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size,
553 l0_mem_size);
554 return -ENOMEM;
555 }
556
557 return 0;
558 }
559
560 /*
561 * In the event that L1 tables are needed, this function validates
562 * the L1 table generation parameters.
563 *
564 * Parameters
565 * l1_mem_base Base address of memory used for L1 table allocation.
566 * l1_mem_size Total size of memory available for L1 tables.
567 * l1_gpt_cnt Number of L1 tables needed.
568 *
569 * Return
570 * Negative Linux error code in the event of a failure, 0 for success.
571 */
validate_l1_params(uintptr_t l1_mem_base,size_t l1_mem_size,unsigned int l1_gpt_cnt)572 static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
573 unsigned int l1_gpt_cnt)
574 {
575 size_t l1_gpt_mem_sz;
576
577 /* Check if the granularity is supported */
578 if (!xlat_arch_is_granule_size_supported(
579 GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
580 return -EPERM;
581 }
582
583 /* Make sure L1 tables are aligned to their size */
584 if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
585 ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
586 l1_mem_base);
587 return -EFAULT;
588 }
589
590 /* Get total memory needed for L1 tables */
591 l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
592
593 /* Check for overflow */
594 if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
595 ERROR("GPT: Overflow calculating L1 memory size\n");
596 return -ENOMEM;
597 }
598
599 /* Make sure enough space was supplied */
600 if (l1_mem_size < l1_gpt_mem_sz) {
601 ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
602 (const char *)" memory\n");
603 ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
604 l1_gpt_mem_sz, l1_mem_size);
605 return -ENOMEM;
606 }
607
608 VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
609 return 0;
610 }
611
612 /*
613 * This function initializes L0 block descriptors (regions that cannot be
614 * transitioned at the granule level) according to the provided PAS.
615 *
616 * Parameters
617 * *pas Pointer to the structure defining the PAS region to
618 * initialize.
619 */
generate_l0_blk_desc(pas_region_t * pas)620 static void generate_l0_blk_desc(pas_region_t *pas)
621 {
622 uint64_t gpt_desc;
623 unsigned long idx, end_idx;
624 uint64_t *l0_gpt_arr;
625
626 assert(gpt_config.plat_gpt_l0_base != 0U);
627 assert(pas != NULL);
628
629 /*
630 * Checking of PAS parameters has already been done in
631 * validate_pas_mappings so no need to check the same things again.
632 */
633
634 l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
635
636 /* Create the GPT Block descriptor for this PAS region */
637 gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
638
639 /* Start index of this region in L0 GPTs */
640 idx = GPT_L0_IDX(pas->base_pa);
641
642 /*
643 * Determine number of L0 GPT descriptors covered by
644 * this PAS region and use the count to populate these
645 * descriptors.
646 */
647 end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
648
649 /* Generate the needed block descriptors */
650 for (; idx < end_idx; idx++) {
651 l0_gpt_arr[idx] = gpt_desc;
652 VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
653 idx, &l0_gpt_arr[idx],
654 (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
655 GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
656 }
657 }
658
659 /*
660 * Helper function to determine if the end physical address lies in the same L0
661 * region as the current physical address. If true, the end physical address is
662 * returned else, the start address of the next region is returned.
663 *
664 * Parameters
665 * cur_pa Physical address of the current PA in the loop through
666 * the range.
667 * end_pa Physical address of the end PA in a PAS range.
668 *
669 * Return
670 * The PA of the end of the current range.
671 */
get_l1_end_pa(uintptr_t cur_pa,uintptr_t end_pa)672 static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
673 {
674 uintptr_t cur_idx;
675 uintptr_t end_idx;
676
677 cur_idx = GPT_L0_IDX(cur_pa);
678 end_idx = GPT_L0_IDX(end_pa);
679
680 assert(cur_idx <= end_idx);
681
682 if (cur_idx == end_idx) {
683 return end_pa;
684 }
685
686 return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
687 }
688
689 /*
690 * Helper function to fill out GPI entries from 'first' granule address of
691 * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
692 * descriptor.
693 *
694 * Parameters
695 * l1 Pointer to L1 table to fill out
696 * first Address of first granule in range
697 * length Length of the range in bytes
698 * gpi GPI set this range to
699 *
700 * Return
701 * Address of next granule in range.
702 */
fill_l1_cont_desc(uint64_t * l1,uintptr_t first,size_t length,unsigned int gpi)703 __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
704 size_t length, unsigned int gpi)
705 {
706 /*
707 * Look up table for contiguous blocks and descriptors.
708 * Entries should be defined in descending block sizes:
709 * 512MB, 32MB and 2MB.
710 */
711 static const gpt_fill_lookup_t gpt_fill_lookup[] = {
712 #if (RME_GPT_MAX_BLOCK == 512)
713 { SZ_512M, GPT_L1_CONT_DESC_512MB },
714 #endif
715 #if (RME_GPT_MAX_BLOCK >= 32)
716 { SZ_32M, GPT_L1_CONT_DESC_32MB },
717 #endif
718 #if (RME_GPT_MAX_BLOCK != 0)
719 { SZ_2M, GPT_L1_CONT_DESC_2MB }
720 #endif
721 };
722
723 /*
724 * Iterate through all block sizes (512MB, 32MB and 2MB)
725 * starting with maximum supported.
726 */
727 for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
728 /* Calculate index */
729 unsigned long idx = GPT_L1_INDEX(first);
730
731 /* Contiguous block size */
732 size_t cont_size = gpt_fill_lookup[i].size;
733
734 if (GPT_REGION_IS_CONT(length, first, cont_size)) {
735
736 /* Generate Contiguous descriptor */
737 uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
738 gpt_fill_lookup[i].desc);
739
740 /* Number of 128-bit L1 entries in block */
741 unsigned int cnt;
742
743 switch (cont_size) {
744 case SZ_512M:
745 cnt = L1_QWORDS_512MB;
746 break;
747 case SZ_32M:
748 cnt = L1_QWORDS_32MB;
749 break;
750 default: /* SZ_2MB */
751 cnt = L1_QWORDS_2MB;
752 }
753
754 VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
755 first, cont_size / SZ_1M);
756
757 /* Fill Contiguous descriptors */
758 fill_desc(&l1[idx], l1_desc, cnt);
759 first += cont_size;
760 length -= cont_size;
761
762 if (length == 0UL) {
763 break;
764 }
765 }
766 }
767
768 return first;
769 }
770
771 /* Build Granules descriptor with the same 'gpi' for every GPI entry */
build_l1_desc(unsigned int gpi)772 static uint64_t build_l1_desc(unsigned int gpi)
773 {
774 uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
775
776 l1_desc |= (l1_desc << 8);
777 l1_desc |= (l1_desc << 16);
778 return (l1_desc | (l1_desc << 32));
779 }
780
781 /*
782 * Helper function to fill out GPI entries from 'first' to 'last' granule
783 * address in a single L1 table with 'l1_desc' Granules descriptor.
784 *
785 * Parameters
786 * l1 Pointer to L1 table to fill out
787 * first Address of first granule in range
788 * last Address of last granule in range (inclusive)
789 * gpi GPI set this range to
790 *
791 * Return
792 * Address of next granule in range.
793 */
fill_l1_gran_desc(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)794 static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
795 uintptr_t last, unsigned int gpi)
796 {
797 uint64_t gpi_mask;
798 unsigned long i;
799
800 /* Generate Granules descriptor */
801 uint64_t l1_desc = build_l1_desc(gpi);
802
803 /* Shift the mask if we're starting in the middle of an L1 entry */
804 gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
805
806 /* Fill out each L1 entry for this region */
807 for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
808
809 /* Account for stopping in the middle of an L1 entry */
810 if (i == GPT_L1_INDEX(last)) {
811 gpi_mask &= (gpi_mask >> ((15U -
812 GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
813 }
814
815 assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
816
817 /* Write GPI values */
818 l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
819
820 /* Reset mask */
821 gpi_mask = ULONG_MAX;
822 }
823
824 return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
825 }
826
827 /*
828 * Helper function to fill out GPI entries in a single L1 table.
829 * This function fills out an entire L1 table with either Granules or Contiguous
830 * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
831 * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
832 * Granules descriptors.
833 *
834 * Parameters
835 * l1 Pointer to L1 table to fill out
836 * first Address of first granule in range
837 * last Address of last granule in range (inclusive)
838 * gpi GPI set this range to
839 */
fill_l1_tbl(uint64_t * l1,uintptr_t first,uintptr_t last,unsigned int gpi)840 static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
841 unsigned int gpi)
842 {
843 assert(l1 != NULL);
844 assert(first <= last);
845 assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
846 assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
847 assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
848
849 #if (RME_GPT_MAX_BLOCK != 0)
850 while (first <= last) {
851 /* Region length */
852 size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
853
854 if (length < SZ_2M) {
855 /*
856 * Fill with Granule descriptors in case of
857 * region length < 2MB.
858 */
859 first = fill_l1_gran_desc(l1, first, last, gpi);
860
861 } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
862 /*
863 * For region length >= 2MB and at least 2MB aligned
864 * call to fill_l1_cont_desc will iterate through
865 * all block sizes (512MB, 32MB and 2MB) supported and
866 * fill corresponding Contiguous descriptors.
867 */
868 first = fill_l1_cont_desc(l1, first, length, gpi);
869 } else {
870 /*
871 * For not aligned region >= 2MB fill with Granules
872 * descriptors up to the next 2MB aligned address.
873 */
874 uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
875 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
876
877 first = fill_l1_gran_desc(l1, first, new_last, gpi);
878 }
879 }
880 #else
881 /* Fill with Granule descriptors */
882 first = fill_l1_gran_desc(l1, first, last, gpi);
883 #endif
884 assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
885 }
886
887 /*
888 * This function finds the next available unused L1 table and initializes all
889 * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
890 * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
891 * event that a PAS region stops midway through an L1 table, thus guaranteeing
892 * that all memory not explicitly assigned is GPI_ANY. This function does not
893 * check for overflow conditions, that should be done by the caller.
894 *
895 * Return
896 * Pointer to the next available L1 table.
897 */
get_new_l1_tbl(void)898 static uint64_t *get_new_l1_tbl(void)
899 {
900 /* Retrieve the next L1 table */
901 uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
902
903 /* Increment L1 GPT address */
904 gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
905
906 /* Initialize all GPIs to GPT_GPI_ANY */
907 for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
908 l1[i] = GPT_L1_ANY_DESC;
909 }
910
911 return l1;
912 }
913
914 /*
915 * When L1 tables are needed, this function creates the necessary L0 table
916 * descriptors and fills out the L1 table entries according to the supplied
917 * PAS range.
918 *
919 * Parameters
920 * *pas Pointer to the structure defining the PAS region.
921 */
generate_l0_tbl_desc(pas_region_t * pas)922 static void generate_l0_tbl_desc(pas_region_t *pas)
923 {
924 uintptr_t end_pa;
925 uintptr_t cur_pa;
926 uintptr_t last_gran_pa;
927 uint64_t *l0_gpt_base;
928 uint64_t *l1_gpt_arr;
929 unsigned int l0_idx, gpi;
930
931 assert(gpt_config.plat_gpt_l0_base != 0U);
932 assert(pas != NULL);
933
934 /*
935 * Checking of PAS parameters has already been done in
936 * validate_pas_mappings so no need to check the same things again.
937 */
938 end_pa = pas->base_pa + pas->size;
939 l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
940
941 /* We start working from the granule at base PA */
942 cur_pa = pas->base_pa;
943
944 /* Get GPI */
945 gpi = GPT_PAS_ATTR_GPI(pas->attrs);
946
947 /* Iterate over each L0 region in this memory range */
948 for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
949 l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
950 l0_idx++) {
951 /*
952 * See if the L0 entry is already a table descriptor or if we
953 * need to create one.
954 */
955 if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
956 /* Get the L1 array from the L0 entry */
957 l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
958 } else {
959 /* Get a new L1 table from the L1 memory space */
960 l1_gpt_arr = get_new_l1_tbl();
961
962 /* Fill out the L0 descriptor and flush it */
963 l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
964 }
965
966 VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
967 l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
968
969 /*
970 * Determine the PA of the last granule in this L0 descriptor.
971 */
972 last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
973 GPT_PGS_ACTUAL_SIZE(gpt_config.p);
974
975 /*
976 * Fill up L1 GPT entries between these two addresses. This
977 * function needs the addresses of the first granule and last
978 * granule in the range.
979 */
980 fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
981
982 /* Advance cur_pa to first granule in next L0 region */
983 cur_pa = get_l1_end_pa(cur_pa, end_pa);
984 }
985 }
986
987 /*
988 * This function flushes a range of L0 descriptors used by a given PAS region
989 * array. There is a chance that some unmodified L0 descriptors would be flushed
990 * in the case that there are "holes" in an array of PAS regions but overall
991 * this should be faster than individually flushing each modified L0 descriptor
992 * as they are created.
993 *
994 * Parameters
995 * *pas Pointer to an array of PAS regions.
996 * pas_count Number of entries in the PAS array.
997 */
flush_l0_for_pas_array(pas_region_t * pas,unsigned int pas_count)998 static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
999 {
1000 unsigned long idx;
1001 unsigned long start_idx;
1002 unsigned long end_idx;
1003 uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
1004
1005 assert(pas != NULL);
1006 assert(pas_count != 0U);
1007
1008 /* Initial start and end values */
1009 start_idx = GPT_L0_IDX(pas[0].base_pa);
1010 end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
1011
1012 /* Find lowest and highest L0 indices used in this PAS array */
1013 for (idx = 1UL; idx < pas_count; idx++) {
1014 if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
1015 start_idx = GPT_L0_IDX(pas[idx].base_pa);
1016 }
1017 if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
1018 end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
1019 }
1020 }
1021
1022 /*
1023 * Flush all covered L0 descriptors, add 1 because we need to include
1024 * the end index value.
1025 */
1026 flush_dcache_range((uintptr_t)&l0[start_idx],
1027 ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
1028 }
1029
1030 /*
1031 * Public API to enable granule protection checks once the tables have all been
1032 * initialized. This function is called at first initialization and then again
1033 * later during warm boots of CPU cores.
1034 *
1035 * Return
1036 * Negative Linux error code in the event of a failure, 0 for success.
1037 */
gpt_enable(void)1038 int gpt_enable(void)
1039 {
1040 u_register_t gpccr_el3;
1041
1042 /*
1043 * Granule tables must be initialised before enabling
1044 * granule protection.
1045 */
1046 if (gpt_config.plat_gpt_l0_base == 0UL) {
1047 ERROR("GPT: Tables have not been initialized!\n");
1048 return -EPERM;
1049 }
1050
1051 /* Write the base address of the L0 tables into GPTBR */
1052 write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
1053 >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
1054
1055 /* GPCCR_EL3.PPS */
1056 gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
1057
1058 /* GPCCR_EL3.PGS */
1059 gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
1060
1061 /*
1062 * Since EL3 maps the L1 region as Inner shareable, use the same
1063 * shareability attribute for GPC as well so that
1064 * GPC fetches are visible to PEs
1065 */
1066 gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
1067
1068 /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
1069 gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
1070 gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
1071
1072 /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
1073 write_gpccr_el3(gpccr_el3);
1074 isb();
1075
1076 /* Invalidate any stale TLB entries and any cached register fields */
1077 tlbipaallos();
1078 dsb();
1079 isb();
1080
1081 /* Enable GPT */
1082 gpccr_el3 |= GPCCR_GPC_BIT;
1083
1084 /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
1085 write_gpccr_el3(gpccr_el3);
1086 isb();
1087 tlbipaallos();
1088 dsb();
1089 isb();
1090
1091 return 0;
1092 }
1093
1094 /*
1095 * Public API to disable granule protection checks.
1096 */
gpt_disable(void)1097 void gpt_disable(void)
1098 {
1099 u_register_t gpccr_el3 = read_gpccr_el3();
1100
1101 write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
1102 dsbsy();
1103 isb();
1104 }
1105
1106 /*
1107 * Public API that initializes the entire protected space to GPT_GPI_ANY using
1108 * the L0 tables (block descriptors). Ideally, this function is invoked prior
1109 * to DDR discovery and initialization. The MMU must be initialized before
1110 * calling this function.
1111 *
1112 * Parameters
1113 * pps PPS value to use for table generation
1114 * l0_mem_base Base address of L0 tables in memory.
1115 * l0_mem_size Total size of memory available for L0 tables.
1116 *
1117 * Return
1118 * Negative Linux error code in the event of a failure, 0 for success.
1119 */
gpt_init_l0_tables(gpccr_pps_e pps,uintptr_t l0_mem_base,size_t l0_mem_size)1120 int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
1121 size_t l0_mem_size)
1122 {
1123 uint64_t gpt_desc;
1124 size_t locks_size = 0;
1125 __unused bitlock_t *bit_locks;
1126 int ret;
1127
1128 /* Ensure that MMU and Data caches are enabled */
1129 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1130
1131 /* Validate other parameters */
1132 ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
1133 if (ret != 0) {
1134 return ret;
1135 }
1136
1137 /* Create the descriptor to initialize L0 entries with */
1138 gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
1139
1140 /* Iterate through all L0 entries */
1141 for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
1142 ((uint64_t *)l0_mem_base)[i] = gpt_desc;
1143 }
1144
1145 #if (RME_GPT_BITLOCK_BLOCK != 0)
1146 /* Initialise bitlocks at the end of L0 table */
1147 bit_locks = (bitlock_t *)(l0_mem_base +
1148 GPT_L0_TABLE_SIZE(gpt_config.t));
1149
1150 /* Size of bitlocks in bytes */
1151 locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
1152 (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
1153
1154 /*
1155 * If protected space size is less than the size covered
1156 * by 'bitlock' structure, initialise a single bitlock.
1157 */
1158 if (locks_size < LOCK_SIZE) {
1159 locks_size = LOCK_SIZE;
1160 }
1161
1162 for (size_t i = 0UL; i < (locks_size/LOCK_SIZE); i++) {
1163 bit_locks[i].lock = 0U;
1164 }
1165 #endif
1166
1167 /* Flush updated L0 tables and bitlocks to memory */
1168 flush_dcache_range((uintptr_t)l0_mem_base,
1169 GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size);
1170
1171 /* Stash the L0 base address once initial setup is complete */
1172 gpt_config.plat_gpt_l0_base = l0_mem_base;
1173
1174 return 0;
1175 }
1176
1177 /*
1178 * Public API that carves out PAS regions from the L0 tables and builds any L1
1179 * tables that are needed. This function ideally is run after DDR discovery and
1180 * initialization. The L0 tables must have already been initialized to GPI_ANY
1181 * when this function is called.
1182 *
1183 * This function can be called multiple times with different L1 memory ranges
1184 * and PAS regions if it is desirable to place L1 tables in different locations
1185 * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
1186 * in the DDR bank that they control).
1187 *
1188 * Parameters
1189 * pgs PGS value to use for table generation.
1190 * l1_mem_base Base address of memory used for L1 tables.
1191 * l1_mem_size Total size of memory available for L1 tables.
1192 * *pas_regions Pointer to PAS regions structure array.
1193 * pas_count Total number of PAS regions.
1194 *
1195 * Return
1196 * Negative Linux error code in the event of a failure, 0 for success.
1197 */
gpt_init_pas_l1_tables(gpccr_pgs_e pgs,uintptr_t l1_mem_base,size_t l1_mem_size,pas_region_t * pas_regions,unsigned int pas_count)1198 int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
1199 size_t l1_mem_size, pas_region_t *pas_regions,
1200 unsigned int pas_count)
1201 {
1202 int l1_gpt_cnt, ret;
1203
1204 /* Ensure that MMU and Data caches are enabled */
1205 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1206
1207 /* PGS is needed for validate_pas_mappings so check it now */
1208 if (pgs > GPT_PGS_MAX) {
1209 ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
1210 return -EINVAL;
1211 }
1212 gpt_config.pgs = pgs;
1213 gpt_config.p = gpt_p_lookup[pgs];
1214
1215 /* Make sure L0 tables have been initialized */
1216 if (gpt_config.plat_gpt_l0_base == 0U) {
1217 ERROR("GPT: L0 tables must be initialized first!\n");
1218 return -EPERM;
1219 }
1220
1221 /* Check if L1 GPTs are required and how many */
1222 l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
1223 if (l1_gpt_cnt < 0) {
1224 return l1_gpt_cnt;
1225 }
1226
1227 VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
1228
1229 /* If L1 tables are needed then validate the L1 parameters */
1230 if (l1_gpt_cnt > 0) {
1231 ret = validate_l1_params(l1_mem_base, l1_mem_size,
1232 (unsigned int)l1_gpt_cnt);
1233 if (ret != 0) {
1234 return ret;
1235 }
1236
1237 /* Set up parameters for L1 table generation */
1238 gpt_l1_tbl = l1_mem_base;
1239 }
1240
1241 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1242 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1243
1244 /* Mask for the L1 index field */
1245 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1246
1247 INFO("GPT: Boot Configuration\n");
1248 INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1249 INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1250 INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1251 INFO(" PAS count: %u\n", pas_count);
1252 INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1253
1254 /* Generate the tables in memory */
1255 for (unsigned int idx = 0U; idx < pas_count; idx++) {
1256 VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
1257 idx, pas_regions[idx].base_pa, pas_regions[idx].size,
1258 GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
1259 GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
1260
1261 /* Check if a block or table descriptor is required */
1262 if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
1263 GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
1264 generate_l0_blk_desc(&pas_regions[idx]);
1265
1266 } else {
1267 generate_l0_tbl_desc(&pas_regions[idx]);
1268 }
1269 }
1270
1271 /* Flush modified L0 tables */
1272 flush_l0_for_pas_array(pas_regions, pas_count);
1273
1274 /* Flush L1 tables if needed */
1275 if (l1_gpt_cnt > 0) {
1276 flush_dcache_range(l1_mem_base,
1277 GPT_L1_TABLE_SIZE(gpt_config.p) *
1278 (size_t)l1_gpt_cnt);
1279 }
1280
1281 /* Make sure that all the entries are written to the memory */
1282 dsbishst();
1283 tlbipaallos();
1284 dsb();
1285 isb();
1286
1287 return 0;
1288 }
1289
1290 /*
1291 * Public API to initialize the runtime gpt_config structure based on the values
1292 * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
1293 * typically happens in a bootloader stage prior to setting up the EL3 runtime
1294 * environment for the granule transition service so this function detects the
1295 * initialization from a previous stage. Granule protection checks must be
1296 * enabled already or this function will return an error.
1297 *
1298 * Return
1299 * Negative Linux error code in the event of a failure, 0 for success.
1300 */
gpt_runtime_init(void)1301 int gpt_runtime_init(void)
1302 {
1303 u_register_t reg;
1304
1305 /* Ensure that MMU and Data caches are enabled */
1306 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
1307
1308 /* Ensure GPC are already enabled */
1309 if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
1310 ERROR("GPT: Granule protection checks are not enabled!\n");
1311 return -EPERM;
1312 }
1313
1314 /*
1315 * Read the L0 table address from GPTBR, we don't need the L1 base
1316 * address since those are included in the L0 tables as needed.
1317 */
1318 reg = read_gptbr_el3();
1319 gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
1320 GPTBR_BADDR_MASK) <<
1321 GPTBR_BADDR_VAL_SHIFT;
1322
1323 /* Read GPCCR to get PGS and PPS values */
1324 reg = read_gpccr_el3();
1325 gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
1326 gpt_config.t = gpt_t_lookup[gpt_config.pps];
1327 gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
1328 gpt_config.p = gpt_p_lookup[gpt_config.pgs];
1329
1330 /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
1331 gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
1332
1333 /* Mask for the L1 index field */
1334 gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
1335
1336 #if (RME_GPT_BITLOCK_BLOCK != 0)
1337 /* Bitlocks at the end of L0 table */
1338 gpt_bitlock_base = (bitlock_t *)(gpt_config.plat_gpt_l0_base +
1339 GPT_L0_TABLE_SIZE(gpt_config.t));
1340 #endif
1341 VERBOSE("GPT: Runtime Configuration\n");
1342 VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
1343 VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
1344 VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
1345 VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
1346 #if (RME_GPT_BITLOCK_BLOCK != 0)
1347 VERBOSE(" Bitlocks: 0x%"PRIxPTR"\n", (uintptr_t)gpt_bitlock_base);
1348 #endif
1349 return 0;
1350 }
1351
1352 /*
1353 * A helper to write the value (target_pas << gpi_shift) to the index of
1354 * the gpt_l1_addr.
1355 */
write_gpt(uint64_t * gpt_l1_desc,uint64_t * gpt_l1_addr,unsigned int gpi_shift,unsigned int idx,unsigned int target_pas)1356 static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
1357 unsigned int gpi_shift, unsigned int idx,
1358 unsigned int target_pas)
1359 {
1360 *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
1361 *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
1362 gpt_l1_addr[idx] = *gpt_l1_desc;
1363
1364 dsboshst();
1365 }
1366
1367 /*
1368 * Helper to retrieve the gpt_l1_* information from the base address
1369 * returned in gpi_info.
1370 */
get_gpi_params(uint64_t base,gpi_info_t * gpi_info)1371 static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
1372 {
1373 uint64_t gpt_l0_desc, *gpt_l0_base;
1374 __unused unsigned int block_idx;
1375
1376 gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
1377 gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
1378 if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
1379 VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
1380 VERBOSE(" Base=0x%"PRIx64"\n", base);
1381 return -EINVAL;
1382 }
1383
1384 /* Get the table index and GPI shift from PA */
1385 gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
1386 gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
1387 gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
1388
1389 #if (RME_GPT_BITLOCK_BLOCK != 0)
1390 /* Block index */
1391 block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
1392
1393 /* Bitlock address and mask */
1394 gpi_info->lock = &gpt_bitlock_base[block_idx / LOCK_BITS];
1395 gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
1396 #endif
1397 return 0;
1398 }
1399
1400 /*
1401 * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
1402 * This function is called with bitlock or spinlock acquired.
1403 */
read_gpi(gpi_info_t * gpi_info)1404 static void read_gpi(gpi_info_t *gpi_info)
1405 {
1406 gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
1407
1408 if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1409 GPT_L1_TYPE_CONT_DESC) {
1410 /* Read GPI from Contiguous descriptor */
1411 gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
1412 } else {
1413 /* Read GPI from Granules descriptor */
1414 gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
1415 GPT_L1_GRAN_DESC_GPI_MASK);
1416 }
1417 }
1418
flush_page_to_popa(uintptr_t addr)1419 static void flush_page_to_popa(uintptr_t addr)
1420 {
1421 size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
1422
1423 if (is_feat_mte2_supported()) {
1424 flush_dcache_to_popa_range_mte2(addr, size);
1425 } else {
1426 flush_dcache_to_popa_range(addr, size);
1427 }
1428 }
1429
1430 /*
1431 * Helper function to check if all L1 entries in 2MB block have
1432 * the same Granules descriptor value.
1433 *
1434 * Parameters
1435 * base Base address of the region to be checked
1436 * gpi_info Pointer to 'gpt_config_t' structure
1437 * l1_desc GPT Granules descriptor with all entries
1438 * set to the same GPI.
1439 *
1440 * Return
1441 * true if L1 all entries have the same descriptor value, false otherwise.
1442 */
check_fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1443 __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1444 uint64_t l1_desc)
1445 {
1446 /* Last L1 entry index in 2MB block */
1447 unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
1448 gpt_l1_cnt_2mb - 1UL;
1449
1450 /* Number of L1 entries in 2MB block */
1451 unsigned int cnt = gpt_l1_cnt_2mb;
1452
1453 /*
1454 * Start check from the last L1 entry and continue until the first
1455 * non-matching to the passed Granules descriptor value is found.
1456 */
1457 while (cnt-- != 0U) {
1458 if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
1459 /* Non-matching L1 entry found */
1460 return false;
1461 }
1462 }
1463
1464 return true;
1465 }
1466
fuse_2mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1467 __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
1468 uint64_t l1_desc)
1469 {
1470 /* L1 entry index of the start of 2MB block */
1471 unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
1472
1473 /* 2MB Contiguous descriptor */
1474 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1475
1476 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1477
1478 fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
1479 }
1480
1481 /*
1482 * Helper function to check if all 1st L1 entries of 2MB blocks
1483 * in 32MB have the same 2MB Contiguous descriptor value.
1484 *
1485 * Parameters
1486 * base Base address of the region to be checked
1487 * gpi_info Pointer to 'gpt_config_t' structure
1488 * l1_desc GPT Granules descriptor.
1489 *
1490 * Return
1491 * true if all L1 entries have the same descriptor value, false otherwise.
1492 */
check_fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1493 __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1494 uint64_t l1_desc)
1495 {
1496 /* The 1st L1 entry index of the last 2MB block in 32MB */
1497 unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
1498 (15UL * gpt_l1_cnt_2mb);
1499
1500 /* 2MB Contiguous descriptor */
1501 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
1502
1503 /* Number of 2MB blocks in 32MB */
1504 unsigned int cnt = 16U;
1505
1506 /* Set the first L1 entry to 2MB Contiguous descriptor */
1507 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
1508
1509 /*
1510 * Start check from the 1st L1 entry of the last 2MB block and
1511 * continue until the first non-matching to 2MB Contiguous descriptor
1512 * value is found.
1513 */
1514 while (cnt-- != 0U) {
1515 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1516 /* Non-matching L1 entry found */
1517 return false;
1518 }
1519 idx -= gpt_l1_cnt_2mb;
1520 }
1521
1522 return true;
1523 }
1524
fuse_32mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1525 __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
1526 uint64_t l1_desc)
1527 {
1528 /* L1 entry index of the start of 32MB block */
1529 unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
1530
1531 /* 32MB Contiguous descriptor */
1532 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1533
1534 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1535
1536 fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
1537 }
1538
1539 /*
1540 * Helper function to check if all 1st L1 entries of 32MB blocks
1541 * in 512MB have the same 32MB Contiguous descriptor value.
1542 *
1543 * Parameters
1544 * base Base address of the region to be checked
1545 * gpi_info Pointer to 'gpt_config_t' structure
1546 * l1_desc GPT Granules descriptor.
1547 *
1548 * Return
1549 * true if all L1 entries have the same descriptor value, false otherwise.
1550 */
check_fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1551 __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1552 uint64_t l1_desc)
1553 {
1554 /* The 1st L1 entry index of the last 32MB block in 512MB */
1555 unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
1556 (15UL * 16UL * gpt_l1_cnt_2mb);
1557
1558 /* 32MB Contiguous descriptor */
1559 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
1560
1561 /* Number of 32MB blocks in 512MB */
1562 unsigned int cnt = 16U;
1563
1564 /* Set the first L1 entry to 2MB Contiguous descriptor */
1565 gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
1566
1567 /*
1568 * Start check from the 1st L1 entry of the last 32MB block and
1569 * continue until the first non-matching to 32MB Contiguous descriptor
1570 * value is found.
1571 */
1572 while (cnt-- != 0U) {
1573 if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
1574 /* Non-matching L1 entry found */
1575 return false;
1576 }
1577 idx -= 16UL * gpt_l1_cnt_2mb;
1578 }
1579
1580 return true;
1581 }
1582
fuse_512mb(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1583 __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
1584 uint64_t l1_desc)
1585 {
1586 /* L1 entry index of the start of 512MB block */
1587 unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
1588
1589 /* 512MB Contiguous descriptor */
1590 uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
1591
1592 VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
1593
1594 fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
1595 }
1596
1597 /*
1598 * Helper function to convert GPI entries in a single L1 table
1599 * from Granules to Contiguous descriptor.
1600 *
1601 * Parameters
1602 * base Base address of the region to be written
1603 * gpi_info Pointer to 'gpt_config_t' structure
1604 * l1_desc GPT Granules descriptor with all entries
1605 * set to the same GPI.
1606 */
fuse_block(uint64_t base,const gpi_info_t * gpi_info,uint64_t l1_desc)1607 __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
1608 uint64_t l1_desc)
1609 {
1610 /* Start with check for 2MB block */
1611 if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
1612 /* Check for 2MB fusing failed */
1613 return;
1614 }
1615
1616 #if (RME_GPT_MAX_BLOCK == 2)
1617 fuse_2mb(base, gpi_info, l1_desc);
1618 #else
1619 /* Check for 32MB block */
1620 if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
1621 /* Check for 32MB fusing failed, fuse to 2MB */
1622 fuse_2mb(base, gpi_info, l1_desc);
1623 return;
1624 }
1625
1626 #if (RME_GPT_MAX_BLOCK == 32)
1627 fuse_32mb(base, gpi_info, l1_desc);
1628 #else
1629 /* Check for 512MB block */
1630 if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
1631 /* Check for 512MB fusing failed, fuse to 32MB */
1632 fuse_32mb(base, gpi_info, l1_desc);
1633 return;
1634 }
1635
1636 /* Fuse to 512MB */
1637 fuse_512mb(base, gpi_info, l1_desc);
1638
1639 #endif /* RME_GPT_MAX_BLOCK == 32 */
1640 #endif /* RME_GPT_MAX_BLOCK == 2 */
1641 }
1642
1643 /*
1644 * Helper function to convert GPI entries in a single L1 table
1645 * from Contiguous to Granules descriptor. This function updates
1646 * descriptor to Granules in passed 'gpt_config_t' structure as
1647 * the result of shuttering.
1648 *
1649 * Parameters
1650 * base Base address of the region to be written
1651 * gpi_info Pointer to 'gpt_config_t' structure
1652 * l1_desc GPT Granules descriptor set this range to.
1653 */
shatter_block(uint64_t base,gpi_info_t * gpi_info,uint64_t l1_desc)1654 __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
1655 uint64_t l1_desc)
1656 {
1657 /* Look-up table for 2MB, 32MB and 512MB locks shattering */
1658 static const gpt_shatter_func gpt_shatter_lookup[] = {
1659 shatter_2mb,
1660 shatter_32mb,
1661 shatter_512mb
1662 };
1663
1664 /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
1665 static const gpt_tlbi_lookup_t tlbi_lookup[] = {
1666 { tlbirpalos_2m, ~(SZ_2M - 1UL) },
1667 { tlbirpalos_32m, ~(SZ_32M - 1UL) },
1668 { tlbirpalos_512m, ~(SZ_512M - 1UL) }
1669 };
1670
1671 /* Get shattering level from Contig field of Contiguous descriptor */
1672 unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
1673
1674 /* Shatter contiguous block */
1675 gpt_shatter_lookup[level](base, gpi_info, l1_desc);
1676
1677 tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
1678 dsbosh();
1679
1680 /*
1681 * Update 'gpt_config_t' structure's descriptor to Granules to reflect
1682 * the shattered GPI back to caller.
1683 */
1684 gpi_info->gpt_l1_desc = l1_desc;
1685 }
1686
1687 /*
1688 * This function is the granule transition delegate service. When a granule
1689 * transition request occurs it is routed to this function to have the request,
1690 * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
1691 *
1692 * TODO: implement support for transitioning multiple granules at once.
1693 *
1694 * Parameters
1695 * base Base address of the region to transition, must be
1696 * aligned to granule size.
1697 * size Size of region to transition, must be aligned to granule
1698 * size.
1699 * src_sec_state Security state of the caller.
1700 *
1701 * Return
1702 * Negative Linux error code in the event of a failure, 0 for success.
1703 */
gpt_delegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)1704 int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1705 {
1706 gpi_info_t gpi_info;
1707 uint64_t nse, __unused l1_desc;
1708 unsigned int target_pas;
1709 int res;
1710
1711 /* Ensure that the tables have been set up before taking requests */
1712 assert(gpt_config.plat_gpt_l0_base != 0UL);
1713
1714 /* Ensure that caches are enabled */
1715 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1716
1717 /* See if this is a single or a range of granule transition */
1718 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1719 return -EINVAL;
1720 }
1721
1722 /* Check that base and size are valid */
1723 if ((ULONG_MAX - base) < size) {
1724 VERBOSE("GPT: Transition request address overflow!\n");
1725 VERBOSE(" Base=0x%"PRIx64"\n", base);
1726 VERBOSE(" Size=0x%lx\n", size);
1727 return -EINVAL;
1728 }
1729
1730 /* Make sure base and size are valid */
1731 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1732 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1733 (size == 0UL) ||
1734 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1735 VERBOSE("GPT: Invalid granule transition address range!\n");
1736 VERBOSE(" Base=0x%"PRIx64"\n", base);
1737 VERBOSE(" Size=0x%lx\n", size);
1738 return -EINVAL;
1739 }
1740
1741 /* Delegate request can only come from REALM or SECURE */
1742 if ((src_sec_state != SMC_FROM_REALM) &&
1743 (src_sec_state != SMC_FROM_SECURE)) {
1744 VERBOSE("GPT: Invalid caller security state 0x%x\n",
1745 src_sec_state);
1746 return -EINVAL;
1747 }
1748
1749 if (src_sec_state == SMC_FROM_REALM) {
1750 target_pas = GPT_GPI_REALM;
1751 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1752 l1_desc = GPT_L1_REALM_DESC;
1753 } else {
1754 target_pas = GPT_GPI_SECURE;
1755 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1756 l1_desc = GPT_L1_SECURE_DESC;
1757 }
1758
1759 res = get_gpi_params(base, &gpi_info);
1760 if (res != 0) {
1761 return res;
1762 }
1763
1764 /*
1765 * Access to GPT is controlled by a lock to ensure that no more
1766 * than one CPU is allowed to make changes at any given time.
1767 */
1768 GPT_LOCK;
1769 read_gpi(&gpi_info);
1770
1771 /* Check that the current address is in NS state */
1772 if (gpi_info.gpi != GPT_GPI_NS) {
1773 VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
1774 VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
1775 gpi_info.gpi);
1776 GPT_UNLOCK;
1777 return -EPERM;
1778 }
1779
1780 #if (RME_GPT_MAX_BLOCK != 0)
1781 /* Check for Contiguous descriptor */
1782 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1783 GPT_L1_TYPE_CONT_DESC) {
1784 shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
1785 }
1786 #endif
1787 /*
1788 * In order to maintain mutual distrust between Realm and Secure
1789 * states, remove any data speculatively fetched into the target
1790 * physical address space.
1791 * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
1792 */
1793 flush_page_to_popa(base | nse);
1794
1795 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1796 gpi_info.gpi_shift, gpi_info.idx, target_pas);
1797
1798 /* Ensure that all agents observe the new configuration */
1799 tlbi_page_dsbosh(base);
1800
1801 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1802
1803 /* Ensure that the scrubbed data have made it past the PoPA */
1804 flush_page_to_popa(base | nse);
1805
1806 #if (RME_GPT_MAX_BLOCK != 0)
1807 if (gpi_info.gpt_l1_desc == l1_desc) {
1808 /* Try to fuse */
1809 fuse_block(base, &gpi_info, l1_desc);
1810 }
1811 #endif
1812
1813 /* Unlock the lock to GPT */
1814 GPT_UNLOCK;
1815
1816 /*
1817 * The isb() will be done as part of context
1818 * synchronization when returning to lower EL.
1819 */
1820 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
1821 base, gpi_info.gpi, target_pas);
1822
1823 return 0;
1824 }
1825
1826 /*
1827 * This function is the granule transition undelegate service. When a granule
1828 * transition request occurs it is routed to this function where the request is
1829 * validated then fulfilled if possible.
1830 *
1831 * TODO: implement support for transitioning multiple granules at once.
1832 *
1833 * Parameters
1834 * base Base address of the region to transition, must be
1835 * aligned to granule size.
1836 * size Size of region to transition, must be aligned to granule
1837 * size.
1838 * src_sec_state Security state of the caller.
1839 *
1840 * Return
1841 * Negative Linux error code in the event of a failure, 0 for success.
1842 */
gpt_undelegate_pas(uint64_t base,size_t size,unsigned int src_sec_state)1843 int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1844 {
1845 gpi_info_t gpi_info;
1846 uint64_t nse, __unused l1_desc;
1847 int res;
1848
1849 /* Ensure that the tables have been set up before taking requests */
1850 assert(gpt_config.plat_gpt_l0_base != 0UL);
1851
1852 /* Ensure that MMU and caches are enabled */
1853 assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1854
1855 /* See if this is a single or a range of granule transition */
1856 if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1857 return -EINVAL;
1858 }
1859
1860 /* Check that base and size are valid */
1861 if ((ULONG_MAX - base) < size) {
1862 VERBOSE("GPT: Transition request address overflow!\n");
1863 VERBOSE(" Base=0x%"PRIx64"\n", base);
1864 VERBOSE(" Size=0x%lx\n", size);
1865 return -EINVAL;
1866 }
1867
1868 /* Make sure base and size are valid */
1869 if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1870 ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
1871 (size == 0UL) ||
1872 ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1873 VERBOSE("GPT: Invalid granule transition address range!\n");
1874 VERBOSE(" Base=0x%"PRIx64"\n", base);
1875 VERBOSE(" Size=0x%lx\n", size);
1876 return -EINVAL;
1877 }
1878
1879 res = get_gpi_params(base, &gpi_info);
1880 if (res != 0) {
1881 return res;
1882 }
1883
1884 /*
1885 * Access to GPT is controlled by a lock to ensure that no more
1886 * than one CPU is allowed to make changes at any given time.
1887 */
1888 GPT_LOCK;
1889 read_gpi(&gpi_info);
1890
1891 /* Check that the current address is in the delegated state */
1892 if ((src_sec_state == SMC_FROM_REALM) &&
1893 (gpi_info.gpi == GPT_GPI_REALM)) {
1894 l1_desc = GPT_L1_REALM_DESC;
1895 nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1896 } else if ((src_sec_state == SMC_FROM_SECURE) &&
1897 (gpi_info.gpi == GPT_GPI_SECURE)) {
1898 l1_desc = GPT_L1_SECURE_DESC;
1899 nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1900 } else {
1901 VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
1902 VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
1903 gpi_info.gpi);
1904 GPT_UNLOCK;
1905 return -EPERM;
1906 }
1907
1908 #if (RME_GPT_MAX_BLOCK != 0)
1909 /* Check for Contiguous descriptor */
1910 if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
1911 GPT_L1_TYPE_CONT_DESC) {
1912 shatter_block(base, &gpi_info, l1_desc);
1913 }
1914 #endif
1915 /*
1916 * In order to maintain mutual distrust between Realm and Secure
1917 * states, remove access now, in order to guarantee that writes
1918 * to the currently-accessible physical address space will not
1919 * later become observable.
1920 */
1921 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1922 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
1923
1924 /* Ensure that all agents observe the new NO_ACCESS configuration */
1925 tlbi_page_dsbosh(base);
1926
1927 /* Ensure that the scrubbed data have made it past the PoPA */
1928 flush_page_to_popa(base | nse);
1929
1930 /*
1931 * Remove any data loaded speculatively in NS space from before
1932 * the scrubbing.
1933 */
1934 nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1935
1936 flush_page_to_popa(base | nse);
1937
1938 /* Clear existing GPI encoding and transition granule */
1939 write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1940 gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
1941
1942 /* Ensure that all agents observe the new NS configuration */
1943 tlbi_page_dsbosh(base);
1944
1945 #if (RME_GPT_MAX_BLOCK != 0)
1946 if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
1947 /* Try to fuse */
1948 fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
1949 }
1950 #endif
1951 /* Unlock the lock to GPT */
1952 GPT_UNLOCK;
1953
1954 /*
1955 * The isb() will be done as part of context
1956 * synchronization when returning to lower EL.
1957 */
1958 VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
1959 base, gpi_info.gpi, GPT_GPI_NS);
1960
1961 return 0;
1962 }
1963