1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015, The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/io.h>
6 #include <linux/errno.h>
7 #include <linux/delay.h>
8 #include <linux/mutex.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/qcom_scm.h>
12 #include <linux/arm-smccc.h>
13 #include <linux/dma-mapping.h>
14
15 #include "qcom_scm.h"
16
17 #define QCOM_SCM_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
18
19 #define MAX_QCOM_SCM_ARGS 10
20 #define MAX_QCOM_SCM_RETS 3
21
22 enum qcom_scm_arg_types {
23 QCOM_SCM_VAL,
24 QCOM_SCM_RO,
25 QCOM_SCM_RW,
26 QCOM_SCM_BUFVAL,
27 };
28
29 #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
30 (((a) & 0x3) << 4) | \
31 (((b) & 0x3) << 6) | \
32 (((c) & 0x3) << 8) | \
33 (((d) & 0x3) << 10) | \
34 (((e) & 0x3) << 12) | \
35 (((f) & 0x3) << 14) | \
36 (((g) & 0x3) << 16) | \
37 (((h) & 0x3) << 18) | \
38 (((i) & 0x3) << 20) | \
39 (((j) & 0x3) << 22) | \
40 ((num) & 0xf))
41
42 #define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
43
44 /**
45 * struct qcom_scm_desc
46 * @arginfo: Metadata describing the arguments in args[]
47 * @args: The array of arguments for the secure syscall
48 * @res: The values returned by the secure syscall
49 */
50 struct qcom_scm_desc {
51 u32 arginfo;
52 u64 args[MAX_QCOM_SCM_ARGS];
53 };
54
55 static u64 qcom_smccc_convention = -1;
56 static DEFINE_MUTEX(qcom_scm_lock);
57
58 #define QCOM_SCM_EBUSY_WAIT_MS 30
59 #define QCOM_SCM_EBUSY_MAX_RETRY 20
60
61 #define N_EXT_QCOM_SCM_ARGS 7
62 #define FIRST_EXT_ARG_IDX 3
63 #define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
64
__qcom_scm_call_do(const struct qcom_scm_desc * desc,struct arm_smccc_res * res,u32 fn_id,u64 x5,u32 type)65 static void __qcom_scm_call_do(const struct qcom_scm_desc *desc,
66 struct arm_smccc_res *res, u32 fn_id,
67 u64 x5, u32 type)
68 {
69 u64 cmd;
70 struct arm_smccc_quirk quirk = { .id = ARM_SMCCC_QUIRK_QCOM_A6 };
71
72 cmd = ARM_SMCCC_CALL_VAL(type, qcom_smccc_convention,
73 ARM_SMCCC_OWNER_SIP, fn_id);
74
75 quirk.state.a6 = 0;
76
77 do {
78 arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0],
79 desc->args[1], desc->args[2], x5,
80 quirk.state.a6, 0, res, &quirk);
81
82 if (res->a0 == QCOM_SCM_INTERRUPTED)
83 cmd = res->a0;
84
85 } while (res->a0 == QCOM_SCM_INTERRUPTED);
86 }
87
qcom_scm_call_do(const struct qcom_scm_desc * desc,struct arm_smccc_res * res,u32 fn_id,u64 x5,bool atomic)88 static void qcom_scm_call_do(const struct qcom_scm_desc *desc,
89 struct arm_smccc_res *res, u32 fn_id,
90 u64 x5, bool atomic)
91 {
92 int retry_count = 0;
93
94 if (atomic) {
95 __qcom_scm_call_do(desc, res, fn_id, x5, ARM_SMCCC_FAST_CALL);
96 return;
97 }
98
99 do {
100 mutex_lock(&qcom_scm_lock);
101
102 __qcom_scm_call_do(desc, res, fn_id, x5,
103 ARM_SMCCC_STD_CALL);
104
105 mutex_unlock(&qcom_scm_lock);
106
107 if (res->a0 == QCOM_SCM_V2_EBUSY) {
108 if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
109 break;
110 msleep(QCOM_SCM_EBUSY_WAIT_MS);
111 }
112 } while (res->a0 == QCOM_SCM_V2_EBUSY);
113 }
114
___qcom_scm_call(struct device * dev,u32 svc_id,u32 cmd_id,const struct qcom_scm_desc * desc,struct arm_smccc_res * res,bool atomic)115 static int ___qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
116 const struct qcom_scm_desc *desc,
117 struct arm_smccc_res *res, bool atomic)
118 {
119 int arglen = desc->arginfo & 0xf;
120 int i;
121 u32 fn_id = QCOM_SCM_FNID(svc_id, cmd_id);
122 u64 x5 = desc->args[FIRST_EXT_ARG_IDX];
123 dma_addr_t args_phys = 0;
124 void *args_virt = NULL;
125 size_t alloc_len;
126 gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
127
128 if (unlikely(arglen > N_REGISTER_ARGS)) {
129 alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64);
130 args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
131
132 if (!args_virt)
133 return -ENOMEM;
134
135 if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
136 __le32 *args = args_virt;
137
138 for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
139 args[i] = cpu_to_le32(desc->args[i +
140 FIRST_EXT_ARG_IDX]);
141 } else {
142 __le64 *args = args_virt;
143
144 for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
145 args[i] = cpu_to_le64(desc->args[i +
146 FIRST_EXT_ARG_IDX]);
147 }
148
149 args_phys = dma_map_single(dev, args_virt, alloc_len,
150 DMA_TO_DEVICE);
151
152 if (dma_mapping_error(dev, args_phys)) {
153 kfree(args_virt);
154 return -ENOMEM;
155 }
156
157 x5 = args_phys;
158 }
159
160 qcom_scm_call_do(desc, res, fn_id, x5, atomic);
161
162 if (args_virt) {
163 dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
164 kfree(args_virt);
165 }
166
167 if ((long)res->a0 < 0)
168 return qcom_scm_remap_error(res->a0);
169
170 return 0;
171 }
172
173 /**
174 * qcom_scm_call() - Invoke a syscall in the secure world
175 * @dev: device
176 * @svc_id: service identifier
177 * @cmd_id: command identifier
178 * @desc: Descriptor structure containing arguments and return values
179 *
180 * Sends a command to the SCM and waits for the command to finish processing.
181 * This should *only* be called in pre-emptible context.
182 */
qcom_scm_call(struct device * dev,u32 svc_id,u32 cmd_id,const struct qcom_scm_desc * desc,struct arm_smccc_res * res)183 static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
184 const struct qcom_scm_desc *desc,
185 struct arm_smccc_res *res)
186 {
187 might_sleep();
188 return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, false);
189 }
190
191 /**
192 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
193 * @dev: device
194 * @svc_id: service identifier
195 * @cmd_id: command identifier
196 * @desc: Descriptor structure containing arguments and return values
197 * @res: Structure containing results from SMC/HVC call
198 *
199 * Sends a command to the SCM and waits for the command to finish processing.
200 * This can be called in atomic context.
201 */
qcom_scm_call_atomic(struct device * dev,u32 svc_id,u32 cmd_id,const struct qcom_scm_desc * desc,struct arm_smccc_res * res)202 static int qcom_scm_call_atomic(struct device *dev, u32 svc_id, u32 cmd_id,
203 const struct qcom_scm_desc *desc,
204 struct arm_smccc_res *res)
205 {
206 return ___qcom_scm_call(dev, svc_id, cmd_id, desc, res, true);
207 }
208
209 /**
210 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
211 * @entry: Entry point function for the cpus
212 * @cpus: The cpumask of cpus that will use the entry point
213 *
214 * Set the cold boot address of the cpus. Any cpu outside the supported
215 * range would be removed from the cpu present mask.
216 */
__qcom_scm_set_cold_boot_addr(void * entry,const cpumask_t * cpus)217 int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
218 {
219 return -ENOTSUPP;
220 }
221
222 /**
223 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
224 * @dev: Device pointer
225 * @entry: Entry point function for the cpus
226 * @cpus: The cpumask of cpus that will use the entry point
227 *
228 * Set the Linux entry point for the SCM to transfer control to when coming
229 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
230 */
__qcom_scm_set_warm_boot_addr(struct device * dev,void * entry,const cpumask_t * cpus)231 int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
232 const cpumask_t *cpus)
233 {
234 return -ENOTSUPP;
235 }
236
237 /**
238 * qcom_scm_cpu_power_down() - Power down the cpu
239 * @flags - Flags to flush cache
240 *
241 * This is an end point to power down cpu. If there was a pending interrupt,
242 * the control would return from this function, otherwise, the cpu jumps to the
243 * warm boot entry point set for this cpu upon reset.
244 */
__qcom_scm_cpu_power_down(u32 flags)245 void __qcom_scm_cpu_power_down(u32 flags)
246 {
247 }
248
__qcom_scm_is_call_available(struct device * dev,u32 svc_id,u32 cmd_id)249 int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
250 {
251 int ret;
252 struct qcom_scm_desc desc = {0};
253 struct arm_smccc_res res;
254
255 desc.arginfo = QCOM_SCM_ARGS(1);
256 desc.args[0] = QCOM_SCM_FNID(svc_id, cmd_id) |
257 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
258
259 ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
260 &desc, &res);
261
262 return ret ? : res.a1;
263 }
264
__qcom_scm_hdcp_req(struct device * dev,struct qcom_scm_hdcp_req * req,u32 req_cnt,u32 * resp)265 int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
266 u32 req_cnt, u32 *resp)
267 {
268 int ret;
269 struct qcom_scm_desc desc = {0};
270 struct arm_smccc_res res;
271
272 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
273 return -ERANGE;
274
275 desc.args[0] = req[0].addr;
276 desc.args[1] = req[0].val;
277 desc.args[2] = req[1].addr;
278 desc.args[3] = req[1].val;
279 desc.args[4] = req[2].addr;
280 desc.args[5] = req[2].val;
281 desc.args[6] = req[3].addr;
282 desc.args[7] = req[3].val;
283 desc.args[8] = req[4].addr;
284 desc.args[9] = req[4].val;
285 desc.arginfo = QCOM_SCM_ARGS(10);
286
287 ret = qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc,
288 &res);
289 *resp = res.a1;
290
291 return ret;
292 }
293
__qcom_scm_init(void)294 void __qcom_scm_init(void)
295 {
296 u64 cmd;
297 struct arm_smccc_res res;
298 u32 function = QCOM_SCM_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD);
299
300 /* First try a SMC64 call */
301 cmd = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_64,
302 ARM_SMCCC_OWNER_SIP, function);
303
304 arm_smccc_smc(cmd, QCOM_SCM_ARGS(1), cmd & (~BIT(ARM_SMCCC_TYPE_SHIFT)),
305 0, 0, 0, 0, 0, &res);
306
307 if (!res.a0 && res.a1)
308 qcom_smccc_convention = ARM_SMCCC_SMC_64;
309 else
310 qcom_smccc_convention = ARM_SMCCC_SMC_32;
311 }
312
__qcom_scm_pas_supported(struct device * dev,u32 peripheral)313 bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
314 {
315 int ret;
316 struct qcom_scm_desc desc = {0};
317 struct arm_smccc_res res;
318
319 desc.args[0] = peripheral;
320 desc.arginfo = QCOM_SCM_ARGS(1);
321
322 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
323 QCOM_SCM_PAS_IS_SUPPORTED_CMD,
324 &desc, &res);
325
326 return ret ? false : !!res.a1;
327 }
328
__qcom_scm_pas_init_image(struct device * dev,u32 peripheral,dma_addr_t metadata_phys)329 int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
330 dma_addr_t metadata_phys)
331 {
332 int ret;
333 struct qcom_scm_desc desc = {0};
334 struct arm_smccc_res res;
335
336 desc.args[0] = peripheral;
337 desc.args[1] = metadata_phys;
338 desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
339
340 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
341 &desc, &res);
342
343 return ret ? : res.a1;
344 }
345
__qcom_scm_pas_mem_setup(struct device * dev,u32 peripheral,phys_addr_t addr,phys_addr_t size)346 int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
347 phys_addr_t addr, phys_addr_t size)
348 {
349 int ret;
350 struct qcom_scm_desc desc = {0};
351 struct arm_smccc_res res;
352
353 desc.args[0] = peripheral;
354 desc.args[1] = addr;
355 desc.args[2] = size;
356 desc.arginfo = QCOM_SCM_ARGS(3);
357
358 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
359 &desc, &res);
360
361 return ret ? : res.a1;
362 }
363
__qcom_scm_pas_auth_and_reset(struct device * dev,u32 peripheral)364 int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
365 {
366 int ret;
367 struct qcom_scm_desc desc = {0};
368 struct arm_smccc_res res;
369
370 desc.args[0] = peripheral;
371 desc.arginfo = QCOM_SCM_ARGS(1);
372
373 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
374 QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
375 &desc, &res);
376
377 return ret ? : res.a1;
378 }
379
__qcom_scm_pas_shutdown(struct device * dev,u32 peripheral)380 int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
381 {
382 int ret;
383 struct qcom_scm_desc desc = {0};
384 struct arm_smccc_res res;
385
386 desc.args[0] = peripheral;
387 desc.arginfo = QCOM_SCM_ARGS(1);
388
389 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
390 &desc, &res);
391
392 return ret ? : res.a1;
393 }
394
__qcom_scm_pas_mss_reset(struct device * dev,bool reset)395 int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
396 {
397 struct qcom_scm_desc desc = {0};
398 struct arm_smccc_res res;
399 int ret;
400
401 desc.args[0] = reset;
402 desc.args[1] = 0;
403 desc.arginfo = QCOM_SCM_ARGS(2);
404
405 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET, &desc,
406 &res);
407
408 return ret ? : res.a1;
409 }
410
__qcom_scm_set_remote_state(struct device * dev,u32 state,u32 id)411 int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
412 {
413 struct qcom_scm_desc desc = {0};
414 struct arm_smccc_res res;
415 int ret;
416
417 desc.args[0] = state;
418 desc.args[1] = id;
419 desc.arginfo = QCOM_SCM_ARGS(2);
420
421 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
422 &desc, &res);
423
424 return ret ? : res.a1;
425 }
426
__qcom_scm_assign_mem(struct device * dev,phys_addr_t mem_region,size_t mem_sz,phys_addr_t src,size_t src_sz,phys_addr_t dest,size_t dest_sz)427 int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
428 size_t mem_sz, phys_addr_t src, size_t src_sz,
429 phys_addr_t dest, size_t dest_sz)
430 {
431 int ret;
432 struct qcom_scm_desc desc = {0};
433 struct arm_smccc_res res;
434
435 desc.args[0] = mem_region;
436 desc.args[1] = mem_sz;
437 desc.args[2] = src;
438 desc.args[3] = src_sz;
439 desc.args[4] = dest;
440 desc.args[5] = dest_sz;
441 desc.args[6] = 0;
442
443 desc.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
444 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
445 QCOM_SCM_VAL, QCOM_SCM_VAL);
446
447 ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
448 QCOM_MEM_PROT_ASSIGN_ID,
449 &desc, &res);
450
451 return ret ? : res.a1;
452 }
453
__qcom_scm_restore_sec_cfg(struct device * dev,u32 device_id,u32 spare)454 int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, u32 spare)
455 {
456 struct qcom_scm_desc desc = {0};
457 struct arm_smccc_res res;
458 int ret;
459
460 desc.args[0] = device_id;
461 desc.args[1] = spare;
462 desc.arginfo = QCOM_SCM_ARGS(2);
463
464 ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP, QCOM_SCM_RESTORE_SEC_CFG,
465 &desc, &res);
466
467 return ret ? : res.a1;
468 }
469
__qcom_scm_iommu_secure_ptbl_size(struct device * dev,u32 spare,size_t * size)470 int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
471 size_t *size)
472 {
473 struct qcom_scm_desc desc = {0};
474 struct arm_smccc_res res;
475 int ret;
476
477 desc.args[0] = spare;
478 desc.arginfo = QCOM_SCM_ARGS(1);
479
480 ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
481 QCOM_SCM_IOMMU_SECURE_PTBL_SIZE, &desc, &res);
482
483 if (size)
484 *size = res.a1;
485
486 return ret ? : res.a2;
487 }
488
__qcom_scm_iommu_secure_ptbl_init(struct device * dev,u64 addr,u32 size,u32 spare)489 int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
490 u32 spare)
491 {
492 struct qcom_scm_desc desc = {0};
493 struct arm_smccc_res res;
494 int ret;
495
496 desc.args[0] = addr;
497 desc.args[1] = size;
498 desc.args[2] = spare;
499 desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
500 QCOM_SCM_VAL);
501
502 ret = qcom_scm_call(dev, QCOM_SCM_SVC_MP,
503 QCOM_SCM_IOMMU_SECURE_PTBL_INIT, &desc, &res);
504
505 /* the pg table has been initialized already, ignore the error */
506 if (ret == -EPERM)
507 ret = 0;
508
509 return ret;
510 }
511
__qcom_scm_set_dload_mode(struct device * dev,bool enable)512 int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
513 {
514 struct qcom_scm_desc desc = {0};
515 struct arm_smccc_res res;
516
517 desc.args[0] = QCOM_SCM_SET_DLOAD_MODE;
518 desc.args[1] = enable ? QCOM_SCM_SET_DLOAD_MODE : 0;
519 desc.arginfo = QCOM_SCM_ARGS(2);
520
521 return qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
522 &desc, &res);
523 }
524
__qcom_scm_io_readl(struct device * dev,phys_addr_t addr,unsigned int * val)525 int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
526 unsigned int *val)
527 {
528 struct qcom_scm_desc desc = {0};
529 struct arm_smccc_res res;
530 int ret;
531
532 desc.args[0] = addr;
533 desc.arginfo = QCOM_SCM_ARGS(1);
534
535 ret = qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ,
536 &desc, &res);
537 if (ret >= 0)
538 *val = res.a1;
539
540 return ret < 0 ? ret : 0;
541 }
542
__qcom_scm_io_writel(struct device * dev,phys_addr_t addr,unsigned int val)543 int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
544 {
545 struct qcom_scm_desc desc = {0};
546 struct arm_smccc_res res;
547
548 desc.args[0] = addr;
549 desc.args[1] = val;
550 desc.arginfo = QCOM_SCM_ARGS(2);
551
552 return qcom_scm_call(dev, QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
553 &desc, &res);
554 }
555
__qcom_scm_qsmmu500_wait_safe_toggle(struct device * dev,bool en)556 int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool en)
557 {
558 struct qcom_scm_desc desc = {0};
559 struct arm_smccc_res res;
560
561 desc.args[0] = QCOM_SCM_CONFIG_ERRATA1_CLIENT_ALL;
562 desc.args[1] = en;
563 desc.arginfo = QCOM_SCM_ARGS(2);
564
565 return qcom_scm_call_atomic(dev, QCOM_SCM_SVC_SMMU_PROGRAM,
566 QCOM_SCM_CONFIG_ERRATA1, &desc, &res);
567 }
568