• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #ifndef CPU_DATA_H
8 #define CPU_DATA_H
9 
10 #include <platform_def.h>	/* CACHE_WRITEBACK_GRANULE required */
11 
12 #include <bl31/ehf.h>
13 
14 /* Size of psci_cpu_data structure */
15 #define PSCI_CPU_DATA_SIZE		12
16 
17 #ifdef __aarch64__
18 
19 /* 8-bytes aligned size of psci_cpu_data structure */
20 #define PSCI_CPU_DATA_SIZE_ALIGNED	((PSCI_CPU_DATA_SIZE + 7) & ~7)
21 
22 #if ENABLE_RME
23 /* Size of cpu_context array */
24 #define CPU_DATA_CONTEXT_NUM		3
25 /* Offset of cpu_ops_ptr, size 8 bytes */
26 #define CPU_DATA_CPU_OPS_PTR		0x18
27 #else /* ENABLE_RME */
28 #define CPU_DATA_CONTEXT_NUM		2
29 #define CPU_DATA_CPU_OPS_PTR		0x10
30 #endif /* ENABLE_RME */
31 
32 #if ENABLE_PAUTH
33 /* 8-bytes aligned offset of apiakey[2], size 16 bytes */
34 #define	CPU_DATA_APIAKEY_OFFSET		(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
35 					     + CPU_DATA_CPU_OPS_PTR)
36 #define CPU_DATA_CRASH_BUF_OFFSET	(0x10 + CPU_DATA_APIAKEY_OFFSET)
37 #else /* ENABLE_PAUTH */
38 #define CPU_DATA_CRASH_BUF_OFFSET	(0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \
39 					     + CPU_DATA_CPU_OPS_PTR)
40 #endif /* ENABLE_PAUTH */
41 
42 /* need enough space in crash buffer to save 8 registers */
43 #define CPU_DATA_CRASH_BUF_SIZE		64
44 
45 #else	/* !__aarch64__ */
46 
47 #if CRASH_REPORTING
48 #error "Crash reporting is not supported in AArch32"
49 #endif
50 #define CPU_DATA_CPU_OPS_PTR		0x0
51 #define CPU_DATA_CRASH_BUF_OFFSET	(0x4 + PSCI_CPU_DATA_SIZE)
52 
53 #endif	/* __aarch64__ */
54 
55 #if CRASH_REPORTING
56 #define CPU_DATA_CRASH_BUF_END		(CPU_DATA_CRASH_BUF_OFFSET + \
57 						CPU_DATA_CRASH_BUF_SIZE)
58 #else
59 #define CPU_DATA_CRASH_BUF_END		CPU_DATA_CRASH_BUF_OFFSET
60 #endif
61 
62 /* cpu_data size is the data size rounded up to the platform cache line size */
63 #define CPU_DATA_SIZE			(((CPU_DATA_CRASH_BUF_END + \
64 					CACHE_WRITEBACK_GRANULE - 1) / \
65 						CACHE_WRITEBACK_GRANULE) * \
66 							CACHE_WRITEBACK_GRANULE)
67 
68 #if ENABLE_RUNTIME_INSTRUMENTATION
69 /* Temporary space to store PMF timestamps from assembly code */
70 #define CPU_DATA_PMF_TS_COUNT		1
71 #define CPU_DATA_PMF_TS0_OFFSET		CPU_DATA_CRASH_BUF_END
72 #define CPU_DATA_PMF_TS0_IDX		0
73 #endif
74 
75 #ifndef __ASSEMBLER__
76 
77 #include <assert.h>
78 #include <stdint.h>
79 
80 #include <arch_helpers.h>
81 #include <lib/cassert.h>
82 #include <lib/psci/psci.h>
83 
84 #include <platform_def.h>
85 
86 /* Offsets for the cpu_data structure */
87 #define CPU_DATA_PSCI_LOCK_OFFSET	__builtin_offsetof\
88 		(cpu_data_t, psci_svc_cpu_data.pcpu_bakery_info)
89 
90 #if PLAT_PCPU_DATA_SIZE
91 #define CPU_DATA_PLAT_PCPU_OFFSET	__builtin_offsetof\
92 		(cpu_data_t, platform_cpu_data)
93 #endif
94 
95 typedef enum context_pas {
96 	CPU_CONTEXT_SECURE = 0,
97 	CPU_CONTEXT_NS,
98 #if ENABLE_RME
99 	CPU_CONTEXT_REALM,
100 #endif
101 	CPU_CONTEXT_NUM
102 } context_pas_t;
103 
104 /*******************************************************************************
105  * Function & variable prototypes
106  ******************************************************************************/
107 
108 /*******************************************************************************
109  * Cache of frequently used per-cpu data:
110  *   Pointers to non-secure, realm, and secure security state contexts
111  *   Address of the crash stack
112  * It is aligned to the cache line boundary to allow efficient concurrent
113  * manipulation of these pointers on different cpus
114  *
115  * The data structure and the _cpu_data accessors should not be used directly
116  * by components that have per-cpu members. The member access macros should be
117  * used for this.
118  ******************************************************************************/
119 typedef struct cpu_data {
120 #ifdef __aarch64__
121 	void *cpu_context[CPU_DATA_CONTEXT_NUM];
122 #endif /* __aarch64__ */
123 	uintptr_t cpu_ops_ptr;
124 	struct psci_cpu_data psci_svc_cpu_data;
125 #if ENABLE_PAUTH
126 	uint64_t apiakey[2];
127 #endif
128 #if CRASH_REPORTING
129 	u_register_t crash_buf[CPU_DATA_CRASH_BUF_SIZE >> 3];
130 #endif
131 #if ENABLE_RUNTIME_INSTRUMENTATION
132 	uint64_t cpu_data_pmf_ts[CPU_DATA_PMF_TS_COUNT];
133 #endif
134 #if PLAT_PCPU_DATA_SIZE
135 	uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
136 #endif
137 #if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
138 	pe_exc_data_t ehf_data;
139 #endif
140 } __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
141 
142 extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT];
143 
144 #ifdef __aarch64__
145 CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM,
146 		assert_cpu_data_context_num_mismatch);
147 #endif
148 
149 #if ENABLE_PAUTH
150 CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof
151 	(cpu_data_t, apiakey),
152 	assert_cpu_data_pauth_stack_offset_mismatch);
153 #endif
154 
155 #if CRASH_REPORTING
156 /* verify assembler offsets match data structures */
157 CASSERT(CPU_DATA_CRASH_BUF_OFFSET == __builtin_offsetof
158 	(cpu_data_t, crash_buf),
159 	assert_cpu_data_crash_stack_offset_mismatch);
160 #endif
161 
162 CASSERT(CPU_DATA_SIZE == sizeof(cpu_data_t),
163 		assert_cpu_data_size_mismatch);
164 
165 CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
166 		(cpu_data_t, cpu_ops_ptr),
167 		assert_cpu_data_cpu_ops_ptr_offset_mismatch);
168 
169 #if ENABLE_RUNTIME_INSTRUMENTATION
170 CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
171 		(cpu_data_t, cpu_data_pmf_ts[0]),
172 		assert_cpu_data_pmf_ts0_offset_mismatch);
173 #endif
174 
175 struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
176 
177 #ifdef __aarch64__
178 /* Return the cpu_data structure for the current CPU. */
_cpu_data(void)179 static inline struct cpu_data *_cpu_data(void)
180 {
181 	return (cpu_data_t *)read_tpidr_el3();
182 }
183 #else
184 struct cpu_data *_cpu_data(void);
185 #endif
186 
187 /*
188  * Returns the index of the cpu_context array for the given security state.
189  * All accesses to cpu_context should be through this helper to make sure
190  * an access is not out-of-bounds. The function assumes security_state is
191  * valid.
192  */
get_cpu_context_index(uint32_t security_state)193 static inline context_pas_t get_cpu_context_index(uint32_t security_state)
194 {
195 	if (security_state == SECURE) {
196 		return CPU_CONTEXT_SECURE;
197 	} else {
198 #if ENABLE_RME
199 		if (security_state == NON_SECURE) {
200 			return CPU_CONTEXT_NS;
201 		} else {
202 			assert(security_state == REALM);
203 			return CPU_CONTEXT_REALM;
204 		}
205 #else
206 		assert(security_state == NON_SECURE);
207 		return CPU_CONTEXT_NS;
208 #endif
209 	}
210 }
211 
212 /**************************************************************************
213  * APIs for initialising and accessing per-cpu data
214  *************************************************************************/
215 
216 void init_cpu_data_ptr(void);
217 void init_cpu_ops(void);
218 
219 #define get_cpu_data(_m)		   _cpu_data()->_m
220 #define set_cpu_data(_m, _v)		   _cpu_data()->_m = (_v)
221 #define get_cpu_data_by_index(_ix, _m)	   _cpu_data_by_index(_ix)->_m
222 #define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = (_v)
223 /* ((cpu_data_t *)0)->_m is a dummy to get the sizeof the struct member _m */
224 #define flush_cpu_data(_m)	   flush_dcache_range((uintptr_t)	  \
225 						&(_cpu_data()->_m), \
226 						sizeof(((cpu_data_t *)0)->_m))
227 #define inv_cpu_data(_m)	   inv_dcache_range((uintptr_t)	  	  \
228 						&(_cpu_data()->_m), \
229 						sizeof(((cpu_data_t *)0)->_m))
230 #define flush_cpu_data_by_index(_ix, _m)	\
231 				   flush_dcache_range((uintptr_t)	  \
232 					 &(_cpu_data_by_index(_ix)->_m),  \
233 						sizeof(((cpu_data_t *)0)->_m))
234 
235 
236 #endif /* __ASSEMBLER__ */
237 #endif /* CPU_DATA_H */
238