• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2022 Linaro Ltd.
4  */
5 
6 #include <linux/bitfield.h>
7 
8 #include <nvhe/pkvm.h>
9 #include <nvhe/mm.h>
10 #include <nvhe/mem_protect.h>
11 #include <nvhe/trap_handler.h>
12 
13 /* SCMI protocol */
14 #define SCMI_PROTOCOL_POWER_DOMAIN	0x11
15 
16 /*  shmem registers */
17 #define SCMI_SHM_CHANNEL_STATUS		0x4
18 #define SCMI_SHM_CHANNEL_FLAGS		0x10
19 #define SCMI_SHM_LENGTH			0x14
20 #define SCMI_SHM_MESSAGE_HEADER		0x18
21 #define SCMI_SHM_MESSAGE_PAYLOAD	0x1c
22 
23 /*  channel status */
24 #define SCMI_CHN_FREE			(1U << 0)
25 #define SCMI_CHN_ERROR			(1U << 1)
26 
27 /*  channel flags */
28 #define SCMI_CHN_IRQ			(1U << 0)
29 
30 /*  message header */
31 #define SCMI_HDR_TOKEN			GENMASK(27, 18)
32 #define SCMI_HDR_PROTOCOL_ID		GENMASK(17, 10)
33 #define SCMI_HDR_MESSAGE_TYPE		GENMASK(9, 8)
34 #define SCMI_HDR_MESSAGE_ID		GENMASK(7, 0)
35 
36 /*  power domain */
37 #define SCMI_PD_STATE_SET		0x4
38 #define SCMI_PD_STATE_SET_FLAGS		0x0
39 #define SCMI_PD_STATE_SET_DOMAIN_ID	0x4
40 #define SCMI_PD_STATE_SET_POWER_STATE	0x8
41 
42 #define SCMI_PD_STATE_SET_STATUS	0x0
43 
44 #define SCMI_PD_STATE_SET_FLAGS_ASYNC	(1U << 0)
45 
46 #define SCMI_PD_POWER_ON		0
47 #define SCMI_PD_POWER_OFF		(1U << 30)
48 
49 #define SCMI_SUCCESS			0
50 
51 
52 static struct {
53 	u32				smc_id;
54 	phys_addr_t			shmem_pfn;
55 	size_t				shmem_size;
56 	void __iomem			*shmem;
57 } scmi_channel;
58 
59 struct scmi_power_domain {
60 	struct kvm_power_domain			*pd;
61 	const struct kvm_power_domain_ops	*ops;
62 };
63 
64 static struct scmi_power_domain scmi_power_domains[MAX_POWER_DOMAINS];
65 static int scmi_power_domain_count;
66 
67 #define SCMI_POLL_TIMEOUT_US	1000000 /* 1s! */
68 
69 /* Forward the command to EL3, and wait for completion */
scmi_run_command(struct kvm_cpu_context * host_ctxt)70 static int scmi_run_command(struct kvm_cpu_context *host_ctxt)
71 {
72 	u32 reg;
73 	unsigned long i = 0;
74 
75 	__kvm_hyp_host_forward_smc(host_ctxt);
76 
77 	do {
78 		reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_CHANNEL_STATUS);
79 		if (reg & SCMI_CHN_FREE)
80 			break;
81 
82 		if (WARN_ON(++i > SCMI_POLL_TIMEOUT_US))
83 			return -ETIMEDOUT;
84 
85 		pkvm_udelay(1);
86 	} while (!(reg & (SCMI_CHN_FREE | SCMI_CHN_ERROR)));
87 
88 	if (reg & SCMI_CHN_ERROR)
89 		return -EIO;
90 
91 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_MESSAGE_PAYLOAD +
92 			    SCMI_PD_STATE_SET_STATUS);
93 	if (reg != SCMI_SUCCESS)
94 		return -EIO;
95 
96 	return 0;
97 }
98 
__kvm_host_scmi_handler(struct kvm_cpu_context * host_ctxt)99 static void __kvm_host_scmi_handler(struct kvm_cpu_context *host_ctxt)
100 {
101 	int i;
102 	u32 reg;
103 	struct scmi_power_domain *scmi_pd = NULL;
104 
105 	/*
106 	 * FIXME: the spec does not really allow for an intermediary filtering
107 	 * messages on the channel: as soon as the host clears SCMI_CHN_FREE,
108 	 * the server may process the message. It doesn't have to wait for a
109 	 * doorbell and could just poll on the shared mem. Unlikely in practice,
110 	 * but this code is not correct without a spec change requiring the
111 	 * server to observe an SMC before processing the message.
112 	 */
113 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_CHANNEL_STATUS);
114 	if (reg & (SCMI_CHN_FREE | SCMI_CHN_ERROR))
115 		return;
116 
117 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_MESSAGE_HEADER);
118 	if (FIELD_GET(SCMI_HDR_PROTOCOL_ID, reg) != SCMI_PROTOCOL_POWER_DOMAIN)
119 		goto out_forward_smc;
120 
121 	if (FIELD_GET(SCMI_HDR_MESSAGE_ID, reg) != SCMI_PD_STATE_SET)
122 		goto out_forward_smc;
123 
124 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_MESSAGE_PAYLOAD +
125 			    SCMI_PD_STATE_SET_FLAGS);
126 	if (WARN_ON(reg & SCMI_PD_STATE_SET_FLAGS_ASYNC))
127 		/* We don't support async requests at the moment */
128 		return;
129 
130 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_MESSAGE_PAYLOAD +
131 			    SCMI_PD_STATE_SET_DOMAIN_ID);
132 
133 	for (i = 0; i < MAX_POWER_DOMAINS; i++) {
134 		if (!scmi_power_domains[i].pd)
135 			break;
136 
137 		if (reg == scmi_power_domains[i].pd->arm_scmi.domain_id) {
138 			scmi_pd = &scmi_power_domains[i];
139 			break;
140 		}
141 	}
142 	if (!scmi_pd)
143 		goto out_forward_smc;
144 
145 	reg = readl_relaxed(scmi_channel.shmem + SCMI_SHM_MESSAGE_PAYLOAD +
146 			    SCMI_PD_STATE_SET_POWER_STATE);
147 	switch (reg) {
148 	case SCMI_PD_POWER_ON:
149 		if (scmi_run_command(host_ctxt))
150 			break;
151 
152 		scmi_pd->ops->power_on(scmi_pd->pd);
153 		break;
154 	case SCMI_PD_POWER_OFF:
155 		scmi_pd->ops->power_off(scmi_pd->pd);
156 
157 		if (scmi_run_command(host_ctxt))
158 			scmi_pd->ops->power_on(scmi_pd->pd);
159 		break;
160 	}
161 	return;
162 
163 out_forward_smc:
164 	__kvm_hyp_host_forward_smc(host_ctxt);
165 }
166 
kvm_host_scmi_handler(struct kvm_cpu_context * host_ctxt)167 bool kvm_host_scmi_handler(struct kvm_cpu_context *host_ctxt)
168 {
169 	DECLARE_REG(u64, func_id, host_ctxt, 0);
170 
171 	if (!scmi_channel.shmem || func_id != scmi_channel.smc_id)
172 		return false; /* Unhandled */
173 
174 	/*
175 	 * Prevent the host from modifying the request while it is in flight.
176 	 * One page is enough, SCMI messages are smaller than that.
177 	 *
178 	 * FIXME: the host is allowed to poll the shmem while the request is in
179 	 * flight, or read shmem when receiving the SCMI interrupt. Although
180 	 * it's unlikely with the SMC-based transport, this too requires some
181 	 * tightening in the spec.
182 	 */
183 	if (WARN_ON(__pkvm_host_donate_hyp(scmi_channel.shmem_pfn, 1)))
184 		return true;
185 
186 	__kvm_host_scmi_handler(host_ctxt);
187 
188 	WARN_ON(__pkvm_hyp_donate_host(scmi_channel.shmem_pfn, 1));
189 	return true; /* Handled */
190 }
191 
pkvm_init_scmi_pd(struct kvm_power_domain * pd,const struct kvm_power_domain_ops * ops)192 int pkvm_init_scmi_pd(struct kvm_power_domain *pd,
193 		      const struct kvm_power_domain_ops *ops)
194 {
195 	int ret;
196 
197 	if (!IS_ALIGNED(pd->arm_scmi.shmem_base, PAGE_SIZE) ||
198 	    pd->arm_scmi.shmem_size < PAGE_SIZE) {
199 		return -EINVAL;
200 	}
201 
202 	if (!scmi_channel.shmem) {
203 		unsigned long shmem;
204 
205 		/* FIXME: Do we need to mark those pages shared in the host s2? */
206 		ret = __pkvm_create_private_mapping(pd->arm_scmi.shmem_base,
207 						    pd->arm_scmi.shmem_size,
208 						    PAGE_HYP_DEVICE,
209 						    &shmem);
210 		if (ret)
211 			return ret;
212 
213 		scmi_channel.smc_id = pd->arm_scmi.smc_id;
214 		scmi_channel.shmem_pfn = hyp_phys_to_pfn(pd->arm_scmi.shmem_base);
215 		scmi_channel.shmem = (void *)shmem;
216 
217 	} else if (scmi_channel.shmem_pfn !=
218 		   hyp_phys_to_pfn(pd->arm_scmi.shmem_base) ||
219 		   scmi_channel.smc_id != pd->arm_scmi.smc_id) {
220 		/* We support a single channel at the moment */
221 		return -ENXIO;
222 	}
223 
224 	if (scmi_power_domain_count == MAX_POWER_DOMAINS)
225 		return -ENOSPC;
226 
227 	scmi_power_domains[scmi_power_domain_count].pd = pd;
228 	scmi_power_domains[scmi_power_domain_count].ops = ops;
229 	scmi_power_domain_count++;
230 	return 0;
231 }
232