1 /*
2 * Copyright (c) 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 /*
25 * This module registers smc handlers that are called by tests running in the
26 * client os. This api is currently only available if lib/sm is enabled.
27 */
28 #if WITH_LIB_SM
29
30 #define LOCAL_TRACE 0
31
32 #include <arch/arch_ops.h>
33 #include <arch/ops.h>
34 #include <err.h>
35 #include <inttypes.h>
36 #include <kernel/thread.h>
37 #include <kernel/timer.h>
38 #include <kernel/vm.h>
39 #include <lib/sm.h>
40 #include <lib/sm/sm_err.h>
41 #include <lib/sm/smcall.h>
42 #include <lib/smc/smc.h>
43 #include <limits.h>
44 #include <lk/init.h>
45 #include <stdatomic.h>
46 #include <string.h>
47 #include <trace.h>
48
49 #include "stdcalltest.h"
50
args_get_id(struct smc32_args * args)51 static ext_mem_obj_id_t args_get_id(struct smc32_args* args) {
52 return (((uint64_t)args->params[1] << 32) | args->params[0]);
53 }
54
args_get_sz(struct smc32_args * args)55 static size_t args_get_sz(struct smc32_args* args) {
56 return (size_t)args->params[2];
57 }
58
59 /**
60 * stdcalltest_sharedmem_rw - Test shared memory buffer.
61 * @id: Shared memory id.
62 * @size: Size.
63 *
64 * Check that buffer contains the 64 bit integer sqequnce [0, 1, 2, ...,
65 * @size / 8 - 1] and modify sequence to [@size, @size - 1, size - 2, ...,
66 * @size - (@size / 8 - 1)].
67 *
68 * Return: 0 on success. SM_ERR_INVALID_PARAMETERS is buffer does not contain
69 * expected input pattern. SM_ERR_INTERNAL_FAILURE if @id could not be mapped.
70 */
stdcalltest_sharedmem_rw(ext_mem_client_id_t client_id,ext_mem_obj_id_t mem_obj_id,size_t size)71 static long stdcalltest_sharedmem_rw(ext_mem_client_id_t client_id,
72 ext_mem_obj_id_t mem_obj_id,
73 size_t size) {
74 struct vmm_aspace* aspace = vmm_get_kernel_aspace();
75 status_t ret;
76 long status;
77 void* va;
78 uint64_t* va64;
79
80 if (!IS_PAGE_ALIGNED(size)) {
81 return SM_ERR_INVALID_PARAMETERS;
82 }
83
84 ret = ext_mem_map_obj_id(aspace, "stdcalltest", client_id, mem_obj_id, 0, 0,
85 size, &va, PAGE_SIZE_SHIFT, 0,
86 ARCH_MMU_FLAG_PERM_NO_EXECUTE);
87 if (ret != NO_ERROR) {
88 status = SM_ERR_INTERNAL_FAILURE;
89 goto err_map;
90 }
91 va64 = va;
92
93 for (size_t i = 0; i < size / sizeof(*va64); i++) {
94 if (va64[i] != i) {
95 TRACEF("input mismatch at %zd, got 0x%" PRIx64
96 " instead of 0x%zx\n",
97 i, va64[i], i);
98 status = SM_ERR_INVALID_PARAMETERS;
99 goto err_input_mismatch;
100 }
101 va64[i] = size - i;
102 }
103 status = 0;
104
105 err_input_mismatch:
106 ret = vmm_free_region(aspace, (vaddr_t)va);
107 if (ret) {
108 status = SM_ERR_INTERNAL_FAILURE;
109 }
110 err_map:
111 return status;
112 }
113
114 #if ARCH_ARM64
115 long clobber_sve_asm(uint32_t byte_clobber);
116 long load_sve_asm(uint8_t* arr, uint64_t len);
117
118 #define SVE_VEC_LEN_BITS 128
119 #define SVE_NB_BYTE_VEC_LEN SVE_VEC_LEN_BITS / 8
120 #define SVE_SVE_REGS_COUNT 32
121
122 #define SMC_FC_TRNG_VERSION SMC_FASTCALL_NR(SMC_ENTITY_STD, 0x50)
123
124 static uint8_t sve_regs[SMP_MAX_CPUS][SVE_SVE_REGS_COUNT * SVE_NB_BYTE_VEC_LEN]
125 __attribute__((aligned(16)));
126
127 enum clobber_restore_error {
128 SVE_NO_ERROR = 0,
129 SVE_GENERIC_ERROR = 1,
130 SVE_REGISTER_NOT_RESTORED = 2,
131 SVE_ERROR_LONG_TYPE = LONG_MAX
132 };
133
stdcalltest_clobber_sve(struct smc32_args * args)134 long stdcalltest_clobber_sve(struct smc32_args* args) {
135 enum clobber_restore_error ret = SVE_NO_ERROR;
136 if (!arch_sve_supported()) {
137 /* test is OK, if there is no SVE there is nothing to assert but this is
138 * not an ERROR */
139 return ret;
140 }
141
142 uint64_t v_cpacr_el1 = arch_enable_sve();
143 uint cpuid = arch_curr_cpu_num();
144 long call_nb = args->params[1];
145
146 /* First Call on cpu needs to Clobber ASM registers */
147 if (call_nb == 1) {
148 ret = clobber_sve_asm(args->params[0]);
149 if (ret != SVE_NO_ERROR) {
150 panic("Failed to Clobber ARM SVE registers: %lx\n", ret);
151 ret = SVE_GENERIC_ERROR;
152 goto end_stdcalltest_clobber_sve;
153 }
154 }
155
156 /* Make sure registers are as expected */
157 const uint8_t EXPECTED = (uint8_t)args->params[0];
158 ret = load_sve_asm(sve_regs[cpuid], SVE_NB_BYTE_VEC_LEN);
159 if (ret != SVE_NO_ERROR) {
160 panic("Failed to Load ARM SVE registers: %lx\n", ret);
161 ret = SVE_GENERIC_ERROR;
162 goto end_stdcalltest_clobber_sve;
163 }
164
165 for (size_t idx = 0; idx < countof(sve_regs[cpuid]); ++idx) {
166 uint8_t val = sve_regs[cpuid][idx];
167
168 if (val != EXPECTED) {
169 ret = SVE_REGISTER_NOT_RESTORED;
170 goto end_stdcalltest_clobber_sve;
171 }
172 }
173
174 end_stdcalltest_clobber_sve:
175 ARM64_WRITE_SYSREG(cpacr_el1, v_cpacr_el1);
176 return ret;
177 }
178
stdcalltest_compute_fpacr(uint64_t * old_cpacr,uint64_t * new_cpacr)179 static long stdcalltest_compute_fpacr(uint64_t* old_cpacr,
180 uint64_t* new_cpacr) {
181 uint64_t cpacr = ARM64_READ_SYSREG(cpacr_el1);
182
183 DEBUG_ASSERT(old_cpacr);
184 DEBUG_ASSERT(new_cpacr);
185
186 if ((cpacr >> 20) & 1) {
187 return SM_ERR_NOT_ALLOWED;
188 }
189
190 *old_cpacr = cpacr;
191 *new_cpacr = cpacr | (3 << 20);
192 return 0;
193 }
194
stdcalltest_random_u32(void)195 static uint32_t stdcalltest_random_u32(void) {
196 /* Initialize the RNG seed to the golden ratio */
197 static atomic_int hash = 0x9e3779b1U;
198 int oldh, newh;
199
200 /* Update the RNG with MurmurHash3 */
201 do {
202 newh = oldh = atomic_load(&hash);
203 newh ^= newh >> 16;
204 __builtin_mul_overflow(newh, 0x85ebca6bU, &newh);
205 newh ^= newh >> 13;
206 __builtin_mul_overflow(newh, 0xc2b2ae35U, &newh);
207 newh ^= newh >> 16;
208 } while (!atomic_compare_exchange_weak(&hash, &oldh, newh));
209
210 return (uint32_t)oldh;
211 }
212
213 static struct fpstate stdcalltest_random_fpstate;
214
stdcalltest_clobber_fpsimd_clobber(struct smc32_args * args)215 static long stdcalltest_clobber_fpsimd_clobber(struct smc32_args* args) {
216 long ret;
217 uint64_t old_cpacr, new_cpacr;
218 bool loaded;
219
220 /*
221 * Check if the FPU at EL1 is already on;
222 * it shouldn't be, so return an error if it is.
223 * Otherwise, save the old value and restore it
224 * after we're done.
225 */
226 ret = stdcalltest_compute_fpacr(&old_cpacr, &new_cpacr);
227 if (ret) {
228 return ret;
229 }
230
231 for (size_t i = 0; i < countof(stdcalltest_random_fpstate.regs); i++) {
232 stdcalltest_random_fpstate.regs[i] =
233 ((uint64_t)stdcalltest_random_u32() << 32) |
234 stdcalltest_random_u32();
235 }
236 /*
237 * TODO: set FPCR&FPSR to random values, but they need to be masked
238 * because many of their bits are MBZ
239 */
240 stdcalltest_random_fpstate.fpcr = 0;
241 stdcalltest_random_fpstate.fpsr = 0;
242
243 ARM64_WRITE_SYSREG(cpacr_el1, new_cpacr);
244 loaded = arm64_fpu_load_fpstate(&stdcalltest_random_fpstate, true);
245 ARM64_WRITE_SYSREG(cpacr_el1, old_cpacr);
246 return loaded ? 0 : SM_ERR_INTERNAL_FAILURE;
247 }
248
stdcalltest_clobber_fpsimd_check(struct smc32_args * args)249 static long stdcalltest_clobber_fpsimd_check(struct smc32_args* args) {
250 long ret;
251 uint64_t old_cpacr, new_cpacr;
252 struct fpstate new_fpstate;
253 bool loaded;
254
255 ret = stdcalltest_compute_fpacr(&old_cpacr, &new_cpacr);
256 if (ret) {
257 return ret;
258 }
259
260 ARM64_WRITE_SYSREG(cpacr_el1, new_cpacr);
261 loaded = arm64_fpu_load_fpstate(&stdcalltest_random_fpstate, false);
262 arm64_fpu_save_fpstate(&new_fpstate);
263 ARM64_WRITE_SYSREG(cpacr_el1, old_cpacr);
264
265 if (loaded) {
266 /*
267 * Check whether the current fpstate is still the one set
268 * earlier by the clobber. If not, it means another thread
269 * ran and overwrote our registers, and we do not want to
270 * leak them here.
271 */
272 ret = SM_ERR_BUSY;
273 goto err;
274 }
275
276 for (size_t i = 0; i < countof(new_fpstate.regs); i++) {
277 if (new_fpstate.regs[i] != stdcalltest_random_fpstate.regs[i]) {
278 TRACEF("regs[%zu] mismatch: %" PRIx64 " != %" PRIx64 "\n", i,
279 new_fpstate.regs[i], stdcalltest_random_fpstate.regs[i]);
280 ret = SM_ERR_INTERNAL_FAILURE;
281 goto err;
282 }
283 }
284 if (new_fpstate.fpcr != stdcalltest_random_fpstate.fpcr) {
285 TRACEF("FPCR mismatch: %" PRIx32 " != %" PRIx32 "\n", new_fpstate.fpcr,
286 stdcalltest_random_fpstate.fpcr);
287 ret = SM_ERR_INTERNAL_FAILURE;
288 goto err;
289 }
290 if (new_fpstate.fpsr != stdcalltest_random_fpstate.fpsr) {
291 TRACEF("FPSR mismatch: %" PRIx32 " != %" PRIx32 "\n", new_fpstate.fpsr,
292 stdcalltest_random_fpstate.fpsr);
293 ret = SM_ERR_INTERNAL_FAILURE;
294 goto err;
295 }
296
297 /* Return 0 on success */
298 ret = 0;
299
300 err:
301 return ret;
302 }
303 #endif
304
305 /* 1ms x5000=5s should be long enough for the test to finish */
306 #define FPSIMD_TIMER_PERIOD_NS (1000000)
307 #define FPSIMD_TIMER_TICKS (5000)
308
309 static struct timer fpsimd_timers[SMP_MAX_CPUS];
310 static uint fpsimd_timer_ticks[SMP_MAX_CPUS];
311
fpsimd_timer_cb(struct timer * timer,lk_time_ns_t now,void * arg)312 static enum handler_return fpsimd_timer_cb(struct timer* timer,
313 lk_time_ns_t now,
314 void* arg) {
315 uint cpu = arch_curr_cpu_num();
316
317 fpsimd_timer_ticks[cpu]--;
318 if (!fpsimd_timer_ticks[cpu]) {
319 LTRACEF("Disabling FP test timer on cpu %u\n", cpu);
320 timer_cancel(&fpsimd_timers[cpu]);
321 }
322
323 return INT_NO_RESCHEDULE;
324 }
325
stdcalltest_clobber_fpsimd_timer(struct smc32_args * args)326 static long stdcalltest_clobber_fpsimd_timer(struct smc32_args* args) {
327 uint cpu = arch_curr_cpu_num();
328 bool start_timer = !fpsimd_timer_ticks[cpu];
329
330 DEBUG_ASSERT(arch_ints_disabled());
331
332 LTRACEF("Enabling FP test timer on cpu %u\n", cpu);
333 fpsimd_timer_ticks[cpu] = FPSIMD_TIMER_TICKS;
334 if (start_timer) {
335 timer_set_periodic_ns(&fpsimd_timers[cpu], FPSIMD_TIMER_PERIOD_NS,
336 fpsimd_timer_cb, NULL);
337 }
338
339 return 1;
340 }
341
stdcalltest_stdcall(struct smc32_args * args)342 static long stdcalltest_stdcall(struct smc32_args* args) {
343 switch (args->smc_nr) {
344 case SMC_SC_TEST_VERSION:
345 return TRUSTY_STDCALLTEST_API_VERSION;
346 case SMC_SC_TEST_SHARED_MEM_RW:
347 return stdcalltest_sharedmem_rw(args->client_id, args_get_id(args),
348 args_get_sz(args));
349 #if ARCH_ARM64
350 case SMC_SC_TEST_CLOBBER_SVE: {
351 return stdcalltest_clobber_sve(args);
352 }
353 #endif
354 default:
355 return SM_ERR_UNDEFINED_SMC;
356 }
357 }
358
stdcalltest_fastcall(struct smc32_args * args)359 static long stdcalltest_fastcall(struct smc32_args* args) {
360 switch (args->smc_nr) {
361 #if ARCH_ARM64
362 case SMC_FC_TEST_CLOBBER_FPSIMD_CLOBBER:
363 return stdcalltest_clobber_fpsimd_clobber(args);
364 case SMC_FC_TEST_CLOBBER_FPSIMD_CHECK:
365 return stdcalltest_clobber_fpsimd_check(args);
366 #else
367 /* This test is a no-op on other architectures, e.g., arm32 */
368 case SMC_FC_TEST_CLOBBER_FPSIMD_CLOBBER:
369 case SMC_FC_TEST_CLOBBER_FPSIMD_CHECK:
370 return 0;
371 #endif
372 default:
373 return SM_ERR_UNDEFINED_SMC;
374 }
375 }
376
stdcalltest_nopcall(struct smc32_args * args)377 static long stdcalltest_nopcall(struct smc32_args* args) {
378 switch (args->params[0]) {
379 case SMC_NC_TEST_CLOBBER_FPSIMD_TIMER:
380 return stdcalltest_clobber_fpsimd_timer(args);
381 default:
382 return SM_ERR_UNDEFINED_SMC;
383 }
384 }
385
386 static struct smc32_entity stdcalltest_sm_entity = {
387 .stdcall_handler = stdcalltest_stdcall,
388 .fastcall_handler = stdcalltest_fastcall,
389 .nopcall_handler = stdcalltest_nopcall,
390 };
391
stdcalltest_init(uint level)392 static void stdcalltest_init(uint level) {
393 int err;
394
395 for (size_t i = 0; i < SMP_MAX_CPUS; i++) {
396 timer_initialize(&fpsimd_timers[i]);
397 }
398
399 err = sm_register_entity(SMC_ENTITY_TEST, &stdcalltest_sm_entity);
400 if (err) {
401 printf("trusty error register entity: %d\n", err);
402 }
403 }
404 LK_INIT_HOOK(stdcalltest, stdcalltest_init, LK_INIT_LEVEL_APPS);
405
406 #endif
407