1 /*
2 * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <lib/el3_runtime/context_mgmt.h>
10 #include "spmd_private.h"
11
12 /*******************************************************************************
13 * spmd_build_spmc_message
14 *
15 * Builds an SPMD to SPMC direct message request.
16 ******************************************************************************/
spmd_build_spmc_message(gp_regs_t * gpregs,unsigned long long message)17 static void spmd_build_spmc_message(gp_regs_t *gpregs, unsigned long long message)
18 {
19 write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
20 write_ctx_reg(gpregs, CTX_GPREG_X1,
21 (SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
22 spmd_spmc_id_get());
23 write_ctx_reg(gpregs, CTX_GPREG_X2, FFA_PARAM_MBZ);
24 write_ctx_reg(gpregs, CTX_GPREG_X3, message);
25 }
26
27 /*******************************************************************************
28 * spmd_pm_secondary_core_set_ep
29 ******************************************************************************/
spmd_pm_secondary_core_set_ep(unsigned long long mpidr,uintptr_t entry_point,unsigned long long context)30 int spmd_pm_secondary_core_set_ep(unsigned long long mpidr,
31 uintptr_t entry_point, unsigned long long context)
32 {
33 int id = plat_core_pos_by_mpidr(mpidr);
34
35 if ((id < 0) || ((unsigned int)id >= PLATFORM_CORE_COUNT)) {
36 ERROR("%s inconsistent MPIDR (%llx)\n", __func__, mpidr);
37 return -EINVAL;
38 }
39
40 /*
41 * Check entry_point address is a PA within
42 * load_address <= entry_point < load_address + binary_size
43 */
44 if (!spmd_check_address_in_binary_image(entry_point)) {
45 ERROR("%s entry point is not within image boundaries (%llx)\n",
46 __func__, mpidr);
47 return -EINVAL;
48 }
49
50 spmd_spm_core_context_t *ctx = spmd_get_context_by_mpidr(mpidr);
51 spmd_pm_secondary_ep_t *secondary_ep = &ctx->secondary_ep;
52 if (secondary_ep->locked) {
53 ERROR("%s entry locked (%llx)\n", __func__, mpidr);
54 return -EINVAL;
55 }
56
57 /* Fill new entry to corresponding secondary core id and lock it */
58 secondary_ep->entry_point = entry_point;
59 secondary_ep->context = context;
60 secondary_ep->locked = true;
61
62 VERBOSE("%s %d %llx %lx %llx\n",
63 __func__, id, mpidr, entry_point, context);
64
65 return 0;
66 }
67
68 /*******************************************************************************
69 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
70 * of the SPMC initialization path, they will initialize any SPs that they
71 * manage. Entry into SPMC is done after initialising minimal architectural
72 * state that guarantees safe execution.
73 ******************************************************************************/
spmd_cpu_on_finish_handler(u_register_t unused)74 static void spmd_cpu_on_finish_handler(u_register_t unused)
75 {
76 entry_point_info_t *spmc_ep_info = spmd_spmc_ep_info_get();
77 spmd_spm_core_context_t *ctx = spmd_get_context();
78 unsigned int linear_id = plat_my_core_pos();
79 uint64_t rc;
80
81 assert(ctx != NULL);
82 assert(ctx->state != SPMC_STATE_ON);
83 assert(spmc_ep_info != NULL);
84
85 /*
86 * TODO: this might require locking the spmc_ep_info structure,
87 * or provisioning one structure per cpu
88 */
89 if (ctx->secondary_ep.entry_point == 0UL) {
90 goto exit;
91 }
92
93 spmc_ep_info->pc = ctx->secondary_ep.entry_point;
94 cm_setup_context(&ctx->cpu_ctx, spmc_ep_info);
95 write_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx), CTX_GPREG_X0,
96 ctx->secondary_ep.context);
97
98 /* Mark CPU as initiating ON operation */
99 ctx->state = SPMC_STATE_ON_PENDING;
100
101 rc = spmd_spm_core_sync_entry(ctx);
102 if (rc != 0ULL) {
103 ERROR("%s failed (%llu) on CPU%u\n", __func__, rc,
104 linear_id);
105 ctx->state = SPMC_STATE_OFF;
106 return;
107 }
108
109 exit:
110 ctx->state = SPMC_STATE_ON;
111
112 VERBOSE("CPU %u on!\n", linear_id);
113 }
114
115 /*******************************************************************************
116 * spmd_cpu_off_handler
117 ******************************************************************************/
spmd_cpu_off_handler(u_register_t unused)118 static int32_t spmd_cpu_off_handler(u_register_t unused)
119 {
120 spmd_spm_core_context_t *ctx = spmd_get_context();
121 unsigned int linear_id = plat_my_core_pos();
122 int64_t rc;
123
124 assert(ctx != NULL);
125 assert(ctx->state != SPMC_STATE_OFF);
126
127 if (ctx->secondary_ep.entry_point == 0UL) {
128 goto exit;
129 }
130
131 /* Build an SPMD to SPMC direct message request. */
132 spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx), PSCI_CPU_OFF);
133
134 rc = spmd_spm_core_sync_entry(ctx);
135 if (rc != 0ULL) {
136 ERROR("%s failed (%llu) on CPU%u\n", __func__, rc, linear_id);
137 }
138
139 /* TODO expect FFA_DIRECT_MSG_RESP returned from SPMC */
140
141 exit:
142 ctx->state = SPMC_STATE_OFF;
143
144 VERBOSE("CPU %u off!\n", linear_id);
145
146 return 0;
147 }
148
149 /*******************************************************************************
150 * Structure populated by the SPM Dispatcher to perform any bookkeeping before
151 * PSCI executes a power mgmt. operation.
152 ******************************************************************************/
153 const spd_pm_ops_t spmd_pm = {
154 .svc_on_finish = spmd_cpu_on_finish_handler,
155 .svc_off = spmd_cpu_off_handler
156 };
157