• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 
8 /*******************************************************************************
9  * This is the Secure Payload Dispatcher (SPD). The dispatcher is meant to be a
10  * plug-in component to the Secure Monitor, registered as a runtime service. The
11  * SPD is expected to be a functional extension of the Secure Payload (SP) that
12  * executes in Secure EL1. The Secure Monitor will delegate all SMCs targeting
13  * the Trusted OS/Applications range to the dispatcher. The SPD will either
14  * handle the request locally or delegate it to the Secure Payload. It is also
15  * responsible for initialising and maintaining communication with the SP.
16  ******************************************************************************/
17 #include <arch_helpers.h>
18 #include <assert.h>
19 #include <bl31.h>
20 #include <bl_common.h>
21 #include <context_mgmt.h>
22 #include <debug.h>
23 #include <errno.h>
24 #include <platform.h>
25 #include <runtime_svc.h>
26 #include <stddef.h>
27 #include <string.h>
28 #include <tsp.h>
29 #include <uuid.h>
30 #include "tspd_private.h"
31 
32 /*******************************************************************************
33  * Address of the entrypoint vector table in the Secure Payload. It is
34  * initialised once on the primary core after a cold boot.
35  ******************************************************************************/
36 tsp_vectors_t *tsp_vectors;
37 
38 /*******************************************************************************
39  * Array to keep track of per-cpu Secure Payload state
40  ******************************************************************************/
41 tsp_context_t tspd_sp_context[TSPD_CORE_COUNT];
42 
43 
44 /* TSP UID */
45 DEFINE_SVC_UUID(tsp_uuid,
46 		0x5b3056a0, 0x3291, 0x427b, 0x98, 0x11,
47 		0x71, 0x68, 0xca, 0x50, 0xf3, 0xfa);
48 
49 int32_t tspd_init(void);
50 
51 /*
52  * This helper function handles Secure EL1 preemption. The preemption could be
53  * due Non Secure interrupts or EL3 interrupts. In both the cases we context
54  * switch to the normal world and in case of EL3 interrupts, it will again be
55  * routed to EL3 which will get handled at the exception vectors.
56  */
tspd_handle_sp_preemption(void * handle)57 uint64_t tspd_handle_sp_preemption(void *handle)
58 {
59 	cpu_context_t *ns_cpu_context;
60 
61 	assert(handle == cm_get_context(SECURE));
62 	cm_el1_sysregs_context_save(SECURE);
63 	/* Get a reference to the non-secure context */
64 	ns_cpu_context = cm_get_context(NON_SECURE);
65 	assert(ns_cpu_context);
66 
67 	/*
68 	 * To allow Secure EL1 interrupt handler to re-enter TSP while TSP
69 	 * is preempted, the secure system register context which will get
70 	 * overwritten must be additionally saved. This is currently done
71 	 * by the TSPD S-EL1 interrupt handler.
72 	 */
73 
74 	/*
75 	 * Restore non-secure state.
76 	 */
77 	cm_el1_sysregs_context_restore(NON_SECURE);
78 	cm_set_next_eret_context(NON_SECURE);
79 
80 	/*
81 	 * The TSP was preempted during execution of a Yielding SMC Call.
82 	 * Return back to the normal world with SMC_PREEMPTED as error
83 	 * code in x0.
84 	 */
85 	SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
86 }
87 
88 /*******************************************************************************
89  * This function is the handler registered for S-EL1 interrupts by the TSPD. It
90  * validates the interrupt and upon success arranges entry into the TSP at
91  * 'tsp_sel1_intr_entry()' for handling the interrupt.
92  ******************************************************************************/
tspd_sel1_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)93 static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
94 					    uint32_t flags,
95 					    void *handle,
96 					    void *cookie)
97 {
98 	uint32_t linear_id;
99 	tsp_context_t *tsp_ctx;
100 
101 	/* Check the security state when the exception was generated */
102 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
103 
104 	/* Sanity check the pointer to this cpu's context */
105 	assert(handle == cm_get_context(NON_SECURE));
106 
107 	/* Save the non-secure context before entering the TSP */
108 	cm_el1_sysregs_context_save(NON_SECURE);
109 
110 	/* Get a reference to this cpu's TSP context */
111 	linear_id = plat_my_core_pos();
112 	tsp_ctx = &tspd_sp_context[linear_id];
113 	assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
114 
115 	/*
116 	 * Determine if the TSP was previously preempted. Its last known
117 	 * context has to be preserved in this case.
118 	 * The TSP should return control to the TSPD after handling this
119 	 * S-EL1 interrupt. Preserve essential EL3 context to allow entry into
120 	 * the TSP at the S-EL1 interrupt entry point using the 'cpu_context'
121 	 * structure. There is no need to save the secure system register
122 	 * context since the TSP is supposed to preserve it during S-EL1
123 	 * interrupt handling.
124 	 */
125 	if (get_yield_smc_active_flag(tsp_ctx->state)) {
126 		tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
127 						      CTX_SPSR_EL3);
128 		tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx,
129 						     CTX_ELR_EL3);
130 #if TSP_NS_INTR_ASYNC_PREEMPT
131 		/*Need to save the previously interrupted secure context */
132 		memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE);
133 #endif
134 	}
135 
136 	cm_el1_sysregs_context_restore(SECURE);
137 	cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry,
138 		    SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
139 
140 	cm_set_next_eret_context(SECURE);
141 
142 	/*
143 	 * Tell the TSP that it has to handle a S-EL1 interrupt synchronously.
144 	 * Also the instruction in normal world where the interrupt was
145 	 * generated is passed for debugging purposes. It is safe to retrieve
146 	 * this address from ELR_EL3 as the secure context will not take effect
147 	 * until el3_exit().
148 	 */
149 	SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3());
150 }
151 
152 #if TSP_NS_INTR_ASYNC_PREEMPT
153 /*******************************************************************************
154  * This function is the handler registered for Non secure interrupts by the
155  * TSPD. It validates the interrupt and upon success arranges entry into the
156  * normal world for handling the interrupt.
157  ******************************************************************************/
tspd_ns_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)158 static uint64_t tspd_ns_interrupt_handler(uint32_t id,
159 					    uint32_t flags,
160 					    void *handle,
161 					    void *cookie)
162 {
163 	/* Check the security state when the exception was generated */
164 	assert(get_interrupt_src_ss(flags) == SECURE);
165 
166 	/*
167 	 * Disable the routing of NS interrupts from secure world to EL3 while
168 	 * interrupted on this core.
169 	 */
170 	disable_intr_rm_local(INTR_TYPE_NS, SECURE);
171 
172 	return tspd_handle_sp_preemption(handle);
173 }
174 #endif
175 
176 /*******************************************************************************
177  * Secure Payload Dispatcher setup. The SPD finds out the SP entrypoint and type
178  * (aarch32/aarch64) if not already known and initialises the context for entry
179  * into the SP for its initialisation.
180  ******************************************************************************/
tspd_setup(void)181 int32_t tspd_setup(void)
182 {
183 	entry_point_info_t *tsp_ep_info;
184 	uint32_t linear_id;
185 
186 	linear_id = plat_my_core_pos();
187 
188 	/*
189 	 * Get information about the Secure Payload (BL32) image. Its
190 	 * absence is a critical failure.  TODO: Add support to
191 	 * conditionally include the SPD service
192 	 */
193 	tsp_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
194 	if (!tsp_ep_info) {
195 		WARN("No TSP provided by BL2 boot loader, Booting device"
196 			" without TSP initialization. SMC`s destined for TSP"
197 			" will return SMC_UNK\n");
198 		return 1;
199 	}
200 
201 	/*
202 	 * If there's no valid entry point for SP, we return a non-zero value
203 	 * signalling failure initializing the service. We bail out without
204 	 * registering any handlers
205 	 */
206 	if (!tsp_ep_info->pc)
207 		return 1;
208 
209 	/*
210 	 * We could inspect the SP image and determine its execution
211 	 * state i.e whether AArch32 or AArch64. Assuming it's AArch64
212 	 * for the time being.
213 	 */
214 	tspd_init_tsp_ep_state(tsp_ep_info,
215 				TSP_AARCH64,
216 				tsp_ep_info->pc,
217 				&tspd_sp_context[linear_id]);
218 
219 #if TSP_INIT_ASYNC
220 	bl31_set_next_image_type(SECURE);
221 #else
222 	/*
223 	 * All TSPD initialization done. Now register our init function with
224 	 * BL31 for deferred invocation
225 	 */
226 	bl31_register_bl32_init(&tspd_init);
227 #endif
228 	return 0;
229 }
230 
231 /*******************************************************************************
232  * This function passes control to the Secure Payload image (BL32) for the first
233  * time on the primary cpu after a cold boot. It assumes that a valid secure
234  * context has already been created by tspd_setup() which can be directly used.
235  * It also assumes that a valid non-secure context has been initialised by PSCI
236  * so it does not need to save and restore any non-secure state. This function
237  * performs a synchronous entry into the Secure payload. The SP passes control
238  * back to this routine through a SMC.
239  ******************************************************************************/
tspd_init(void)240 int32_t tspd_init(void)
241 {
242 	uint32_t linear_id = plat_my_core_pos();
243 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
244 	entry_point_info_t *tsp_entry_point;
245 	uint64_t rc;
246 
247 	/*
248 	 * Get information about the Secure Payload (BL32) image. Its
249 	 * absence is a critical failure.
250 	 */
251 	tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
252 	assert(tsp_entry_point);
253 
254 	cm_init_my_context(tsp_entry_point);
255 
256 	/*
257 	 * Arrange for an entry into the test secure payload. It will be
258 	 * returned via TSP_ENTRY_DONE case
259 	 */
260 	rc = tspd_synchronous_sp_entry(tsp_ctx);
261 	assert(rc != 0);
262 
263 	return rc;
264 }
265 
266 
267 /*******************************************************************************
268  * This function is responsible for handling all SMCs in the Trusted OS/App
269  * range from the non-secure state as defined in the SMC Calling Convention
270  * Document. It is also responsible for communicating with the Secure payload
271  * to delegate work and return results back to the non-secure state. Lastly it
272  * will also return any information that the secure payload needs to do the
273  * work assigned to it.
274  ******************************************************************************/
tspd_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)275 uint64_t tspd_smc_handler(uint32_t smc_fid,
276 			 uint64_t x1,
277 			 uint64_t x2,
278 			 uint64_t x3,
279 			 uint64_t x4,
280 			 void *cookie,
281 			 void *handle,
282 			 uint64_t flags)
283 {
284 	cpu_context_t *ns_cpu_context;
285 	uint32_t linear_id = plat_my_core_pos(), ns;
286 	tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
287 	uint64_t rc;
288 #if TSP_INIT_ASYNC
289 	entry_point_info_t *next_image_info;
290 #endif
291 
292 	/* Determine which security state this SMC originated from */
293 	ns = is_caller_non_secure(flags);
294 
295 	switch (smc_fid) {
296 
297 	/*
298 	 * This function ID is used by TSP to indicate that it was
299 	 * preempted by a normal world IRQ.
300 	 *
301 	 */
302 	case TSP_PREEMPTED:
303 		if (ns)
304 			SMC_RET1(handle, SMC_UNK);
305 
306 		return tspd_handle_sp_preemption(handle);
307 
308 	/*
309 	 * This function ID is used only by the TSP to indicate that it has
310 	 * finished handling a S-EL1 interrupt or was preempted by a higher
311 	 * priority pending EL3 interrupt. Execution should resume
312 	 * in the normal world.
313 	 */
314 	case TSP_HANDLED_S_EL1_INTR:
315 		if (ns)
316 			SMC_RET1(handle, SMC_UNK);
317 
318 		assert(handle == cm_get_context(SECURE));
319 
320 		/*
321 		 * Restore the relevant EL3 state which saved to service
322 		 * this SMC.
323 		 */
324 		if (get_yield_smc_active_flag(tsp_ctx->state)) {
325 			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
326 				    CTX_SPSR_EL3,
327 				    tsp_ctx->saved_spsr_el3);
328 			SMC_SET_EL3(&tsp_ctx->cpu_ctx,
329 				    CTX_ELR_EL3,
330 				    tsp_ctx->saved_elr_el3);
331 #if TSP_NS_INTR_ASYNC_PREEMPT
332 			/*
333 			 * Need to restore the previously interrupted
334 			 * secure context.
335 			 */
336 			memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx,
337 				TSPD_SP_CTX_SIZE);
338 #endif
339 		}
340 
341 		/* Get a reference to the non-secure context */
342 		ns_cpu_context = cm_get_context(NON_SECURE);
343 		assert(ns_cpu_context);
344 
345 		/*
346 		 * Restore non-secure state. There is no need to save the
347 		 * secure system register context since the TSP was supposed
348 		 * to preserve it during S-EL1 interrupt handling.
349 		 */
350 		cm_el1_sysregs_context_restore(NON_SECURE);
351 		cm_set_next_eret_context(NON_SECURE);
352 
353 		SMC_RET0((uint64_t) ns_cpu_context);
354 
355 	/*
356 	 * This function ID is used only by the SP to indicate it has
357 	 * finished initialising itself after a cold boot
358 	 */
359 	case TSP_ENTRY_DONE:
360 		if (ns)
361 			SMC_RET1(handle, SMC_UNK);
362 
363 		/*
364 		 * Stash the SP entry points information. This is done
365 		 * only once on the primary cpu
366 		 */
367 		assert(tsp_vectors == NULL);
368 		tsp_vectors = (tsp_vectors_t *) x1;
369 
370 		if (tsp_vectors) {
371 			set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON);
372 
373 			/*
374 			 * TSP has been successfully initialized. Register power
375 			 * managemnt hooks with PSCI
376 			 */
377 			psci_register_spd_pm_hook(&tspd_pm);
378 
379 			/*
380 			 * Register an interrupt handler for S-EL1 interrupts
381 			 * when generated during code executing in the
382 			 * non-secure state.
383 			 */
384 			flags = 0;
385 			set_interrupt_rm_flag(flags, NON_SECURE);
386 			rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
387 						tspd_sel1_interrupt_handler,
388 						flags);
389 			if (rc)
390 				panic();
391 
392 #if TSP_NS_INTR_ASYNC_PREEMPT
393 			/*
394 			 * Register an interrupt handler for NS interrupts when
395 			 * generated during code executing in secure state are
396 			 * routed to EL3.
397 			 */
398 			flags = 0;
399 			set_interrupt_rm_flag(flags, SECURE);
400 
401 			rc = register_interrupt_type_handler(INTR_TYPE_NS,
402 						tspd_ns_interrupt_handler,
403 						flags);
404 			if (rc)
405 				panic();
406 
407 			/*
408 			 * Disable the NS interrupt locally.
409 			 */
410 			disable_intr_rm_local(INTR_TYPE_NS, SECURE);
411 #endif
412 		}
413 
414 
415 #if TSP_INIT_ASYNC
416 		/* Save the Secure EL1 system register context */
417 		assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx);
418 		cm_el1_sysregs_context_save(SECURE);
419 
420 		/* Program EL3 registers to enable entry into the next EL */
421 		next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE);
422 		assert(next_image_info);
423 		assert(NON_SECURE ==
424 				GET_SECURITY_STATE(next_image_info->h.attr));
425 
426 		cm_init_my_context(next_image_info);
427 		cm_prepare_el3_exit(NON_SECURE);
428 		SMC_RET0(cm_get_context(NON_SECURE));
429 #else
430 		/*
431 		 * SP reports completion. The SPD must have initiated
432 		 * the original request through a synchronous entry
433 		 * into the SP. Jump back to the original C runtime
434 		 * context.
435 		 */
436 		tspd_synchronous_sp_exit(tsp_ctx, x1);
437 #endif
438 	/*
439 	 * This function ID is used only by the SP to indicate it has finished
440 	 * aborting a preempted Yielding SMC Call.
441 	 */
442 	case TSP_ABORT_DONE:
443 
444 	/*
445 	 * These function IDs are used only by the SP to indicate it has
446 	 * finished:
447 	 * 1. turning itself on in response to an earlier psci
448 	 *    cpu_on request
449 	 * 2. resuming itself after an earlier psci cpu_suspend
450 	 *    request.
451 	 */
452 	case TSP_ON_DONE:
453 	case TSP_RESUME_DONE:
454 
455 	/*
456 	 * These function IDs are used only by the SP to indicate it has
457 	 * finished:
458 	 * 1. suspending itself after an earlier psci cpu_suspend
459 	 *    request.
460 	 * 2. turning itself off in response to an earlier psci
461 	 *    cpu_off request.
462 	 */
463 	case TSP_OFF_DONE:
464 	case TSP_SUSPEND_DONE:
465 	case TSP_SYSTEM_OFF_DONE:
466 	case TSP_SYSTEM_RESET_DONE:
467 		if (ns)
468 			SMC_RET1(handle, SMC_UNK);
469 
470 		/*
471 		 * SP reports completion. The SPD must have initiated the
472 		 * original request through a synchronous entry into the SP.
473 		 * Jump back to the original C runtime context, and pass x1 as
474 		 * return value to the caller
475 		 */
476 		tspd_synchronous_sp_exit(tsp_ctx, x1);
477 
478 		/*
479 		 * Request from non-secure client to perform an
480 		 * arithmetic operation or response from secure
481 		 * payload to an earlier request.
482 		 */
483 	case TSP_FAST_FID(TSP_ADD):
484 	case TSP_FAST_FID(TSP_SUB):
485 	case TSP_FAST_FID(TSP_MUL):
486 	case TSP_FAST_FID(TSP_DIV):
487 
488 	case TSP_YIELD_FID(TSP_ADD):
489 	case TSP_YIELD_FID(TSP_SUB):
490 	case TSP_YIELD_FID(TSP_MUL):
491 	case TSP_YIELD_FID(TSP_DIV):
492 		if (ns) {
493 			/*
494 			 * This is a fresh request from the non-secure client.
495 			 * The parameters are in x1 and x2. Figure out which
496 			 * registers need to be preserved, save the non-secure
497 			 * state and send the request to the secure payload.
498 			 */
499 			assert(handle == cm_get_context(NON_SECURE));
500 
501 			/* Check if we are already preempted */
502 			if (get_yield_smc_active_flag(tsp_ctx->state))
503 				SMC_RET1(handle, SMC_UNK);
504 
505 			cm_el1_sysregs_context_save(NON_SECURE);
506 
507 			/* Save x1 and x2 for use by TSP_GET_ARGS call below */
508 			store_tsp_args(tsp_ctx, x1, x2);
509 
510 			/*
511 			 * We are done stashing the non-secure context. Ask the
512 			 * secure payload to do the work now.
513 			 */
514 
515 			/*
516 			 * Verify if there is a valid context to use, copy the
517 			 * operation type and parameters to the secure context
518 			 * and jump to the fast smc entry point in the secure
519 			 * payload. Entry into S-EL1 will take place upon exit
520 			 * from this function.
521 			 */
522 			assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
523 
524 			/* Set appropriate entry for SMC.
525 			 * We expect the TSP to manage the PSTATE.I and PSTATE.F
526 			 * flags as appropriate.
527 			 */
528 			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
529 				cm_set_elr_el3(SECURE, (uint64_t)
530 						&tsp_vectors->fast_smc_entry);
531 			} else {
532 				set_yield_smc_active_flag(tsp_ctx->state);
533 				cm_set_elr_el3(SECURE, (uint64_t)
534 						&tsp_vectors->yield_smc_entry);
535 #if TSP_NS_INTR_ASYNC_PREEMPT
536 				/*
537 				 * Enable the routing of NS interrupts to EL3
538 				 * during processing of a Yielding SMC Call on
539 				 * this core.
540 				 */
541 				enable_intr_rm_local(INTR_TYPE_NS, SECURE);
542 #endif
543 			}
544 
545 			cm_el1_sysregs_context_restore(SECURE);
546 			cm_set_next_eret_context(SECURE);
547 			SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
548 		} else {
549 			/*
550 			 * This is the result from the secure client of an
551 			 * earlier request. The results are in x1-x3. Copy it
552 			 * into the non-secure context, save the secure state
553 			 * and return to the non-secure state.
554 			 */
555 			assert(handle == cm_get_context(SECURE));
556 			cm_el1_sysregs_context_save(SECURE);
557 
558 			/* Get a reference to the non-secure context */
559 			ns_cpu_context = cm_get_context(NON_SECURE);
560 			assert(ns_cpu_context);
561 
562 			/* Restore non-secure state */
563 			cm_el1_sysregs_context_restore(NON_SECURE);
564 			cm_set_next_eret_context(NON_SECURE);
565 			if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_YIELD) {
566 				clr_yield_smc_active_flag(tsp_ctx->state);
567 #if TSP_NS_INTR_ASYNC_PREEMPT
568 				/*
569 				 * Disable the routing of NS interrupts to EL3
570 				 * after processing of a Yielding SMC Call on
571 				 * this core is finished.
572 				 */
573 				disable_intr_rm_local(INTR_TYPE_NS, SECURE);
574 #endif
575 			}
576 
577 			SMC_RET3(ns_cpu_context, x1, x2, x3);
578 		}
579 
580 		break;
581 	/*
582 	 * Request from the non-secure world to abort a preempted Yielding SMC
583 	 * Call.
584 	 */
585 	case TSP_FID_ABORT:
586 		/* ABORT should only be invoked by normal world */
587 		if (!ns) {
588 			assert(0);
589 			break;
590 		}
591 
592 		assert(handle == cm_get_context(NON_SECURE));
593 		cm_el1_sysregs_context_save(NON_SECURE);
594 
595 		/* Abort the preempted SMC request */
596 		if (!tspd_abort_preempted_smc(tsp_ctx)) {
597 			/*
598 			 * If there was no preempted SMC to abort, return
599 			 * SMC_UNK.
600 			 *
601 			 * Restoring the NON_SECURE context is not necessary as
602 			 * the synchronous entry did not take place if the
603 			 * return code of tspd_abort_preempted_smc is zero.
604 			 */
605 			cm_set_next_eret_context(NON_SECURE);
606 			break;
607 		}
608 
609 		cm_el1_sysregs_context_restore(NON_SECURE);
610 		cm_set_next_eret_context(NON_SECURE);
611 		SMC_RET1(handle, SMC_OK);
612 
613 		/*
614 		 * Request from non secure world to resume the preempted
615 		 * Yielding SMC Call.
616 		 */
617 	case TSP_FID_RESUME:
618 		/* RESUME should be invoked only by normal world */
619 		if (!ns) {
620 			assert(0);
621 			break;
622 		}
623 
624 		/*
625 		 * This is a resume request from the non-secure client.
626 		 * save the non-secure state and send the request to
627 		 * the secure payload.
628 		 */
629 		assert(handle == cm_get_context(NON_SECURE));
630 
631 		/* Check if we are already preempted before resume */
632 		if (!get_yield_smc_active_flag(tsp_ctx->state))
633 			SMC_RET1(handle, SMC_UNK);
634 
635 		cm_el1_sysregs_context_save(NON_SECURE);
636 
637 		/*
638 		 * We are done stashing the non-secure context. Ask the
639 		 * secure payload to do the work now.
640 		 */
641 #if TSP_NS_INTR_ASYNC_PREEMPT
642 		/*
643 		 * Enable the routing of NS interrupts to EL3 during resumption
644 		 * of a Yielding SMC Call on this core.
645 		 */
646 		enable_intr_rm_local(INTR_TYPE_NS, SECURE);
647 #endif
648 
649 
650 
651 		/* We just need to return to the preempted point in
652 		 * TSP and the execution will resume as normal.
653 		 */
654 		cm_el1_sysregs_context_restore(SECURE);
655 		cm_set_next_eret_context(SECURE);
656 		SMC_RET0(&tsp_ctx->cpu_ctx);
657 
658 		/*
659 		 * This is a request from the secure payload for more arguments
660 		 * for an ongoing arithmetic operation requested by the
661 		 * non-secure world. Simply return the arguments from the non-
662 		 * secure client in the original call.
663 		 */
664 	case TSP_GET_ARGS:
665 		if (ns)
666 			SMC_RET1(handle, SMC_UNK);
667 
668 		get_tsp_args(tsp_ctx, x1, x2);
669 		SMC_RET2(handle, x1, x2);
670 
671 	case TOS_CALL_COUNT:
672 		/*
673 		 * Return the number of service function IDs implemented to
674 		 * provide service to non-secure
675 		 */
676 		SMC_RET1(handle, TSP_NUM_FID);
677 
678 	case TOS_UID:
679 		/* Return TSP UID to the caller */
680 		SMC_UUID_RET(handle, tsp_uuid);
681 
682 	case TOS_CALL_VERSION:
683 		/* Return the version of current implementation */
684 		SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR);
685 
686 	default:
687 		break;
688 	}
689 
690 	SMC_RET1(handle, SMC_UNK);
691 }
692 
693 /* Define a SPD runtime service descriptor for fast SMC calls */
694 DECLARE_RT_SVC(
695 	tspd_fast,
696 
697 	OEN_TOS_START,
698 	OEN_TOS_END,
699 	SMC_TYPE_FAST,
700 	tspd_setup,
701 	tspd_smc_handler
702 );
703 
704 /* Define a SPD runtime service descriptor for Yielding SMC Calls */
705 DECLARE_RT_SVC(
706 	tspd_std,
707 
708 	OEN_TOS_START,
709 	OEN_TOS_END,
710 	SMC_TYPE_YIELD,
711 	NULL,
712 	tspd_smc_handler
713 );
714