• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
19 
20 #include "qcom_scm.h"
21 
22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23 module_param(download_mode, bool, 0);
24 
25 #define SCM_HAS_CORE_CLK	BIT(0)
26 #define SCM_HAS_IFACE_CLK	BIT(1)
27 #define SCM_HAS_BUS_CLK		BIT(2)
28 
29 struct qcom_scm {
30 	struct device *dev;
31 	struct clk *core_clk;
32 	struct clk *iface_clk;
33 	struct clk *bus_clk;
34 	struct reset_controller_dev reset;
35 
36 	u64 dload_mode_addr;
37 };
38 
39 struct qcom_scm_current_perm_info {
40 	__le32 vmid;
41 	__le32 perm;
42 	__le64 ctx;
43 	__le32 ctx_size;
44 	__le32 unused;
45 };
46 
47 struct qcom_scm_mem_map_info {
48 	__le64 mem_addr;
49 	__le64 mem_size;
50 };
51 
52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
56 
57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
61 
62 struct qcom_scm_wb_entry {
63 	int flag;
64 	void *entry;
65 };
66 
67 static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
72 };
73 
74 static const char *qcom_scm_convention_names[] = {
75 	[SMC_CONVENTION_UNKNOWN] = "unknown",
76 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
77 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
78 	[SMC_CONVENTION_LEGACY] = "smc legacy",
79 };
80 
81 static struct qcom_scm *__scm;
82 
qcom_scm_clk_enable(void)83 static int qcom_scm_clk_enable(void)
84 {
85 	int ret;
86 
87 	ret = clk_prepare_enable(__scm->core_clk);
88 	if (ret)
89 		goto bail;
90 
91 	ret = clk_prepare_enable(__scm->iface_clk);
92 	if (ret)
93 		goto disable_core;
94 
95 	ret = clk_prepare_enable(__scm->bus_clk);
96 	if (ret)
97 		goto disable_iface;
98 
99 	return 0;
100 
101 disable_iface:
102 	clk_disable_unprepare(__scm->iface_clk);
103 disable_core:
104 	clk_disable_unprepare(__scm->core_clk);
105 bail:
106 	return ret;
107 }
108 
qcom_scm_clk_disable(void)109 static void qcom_scm_clk_disable(void)
110 {
111 	clk_disable_unprepare(__scm->core_clk);
112 	clk_disable_unprepare(__scm->iface_clk);
113 	clk_disable_unprepare(__scm->bus_clk);
114 }
115 
116 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
117 static DEFINE_SPINLOCK(scm_query_lock);
118 
__get_convention(void)119 static enum qcom_scm_convention __get_convention(void)
120 {
121 	unsigned long flags;
122 	struct qcom_scm_desc desc = {
123 		.svc = QCOM_SCM_SVC_INFO,
124 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
125 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
126 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
127 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
128 		.arginfo = QCOM_SCM_ARGS(1),
129 		.owner = ARM_SMCCC_OWNER_SIP,
130 	};
131 	struct qcom_scm_res res;
132 	enum qcom_scm_convention probed_convention;
133 	int ret;
134 	bool forced = false;
135 
136 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
137 		return qcom_scm_convention;
138 
139 	/*
140 	 * Per the "SMC calling convention specification", the 64-bit calling
141 	 * convention can only be used when the client is 64-bit, otherwise
142 	 * system will encounter the undefined behaviour.
143 	 */
144 #if IS_ENABLED(CONFIG_ARM64)
145 	/*
146 	 * Device isn't required as there is only one argument - no device
147 	 * needed to dma_map_single to secure world
148 	 */
149 	probed_convention = SMC_CONVENTION_ARM_64;
150 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
151 	if (!ret && res.result[0] == 1)
152 		goto found;
153 
154 	/*
155 	 * Some SC7180 firmwares didn't implement the
156 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
157 	 * calling conventions on these firmwares. Luckily we don't make any
158 	 * early calls into the firmware on these SoCs so the device pointer
159 	 * will be valid here to check if the compatible matches.
160 	 */
161 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
162 		forced = true;
163 		goto found;
164 	}
165 #endif
166 
167 	probed_convention = SMC_CONVENTION_ARM_32;
168 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
169 	if (!ret && res.result[0] == 1)
170 		goto found;
171 
172 	probed_convention = SMC_CONVENTION_LEGACY;
173 found:
174 	spin_lock_irqsave(&scm_query_lock, flags);
175 	if (probed_convention != qcom_scm_convention) {
176 		qcom_scm_convention = probed_convention;
177 		pr_info("qcom_scm: convention: %s%s\n",
178 			qcom_scm_convention_names[qcom_scm_convention],
179 			forced ? " (forced)" : "");
180 	}
181 	spin_unlock_irqrestore(&scm_query_lock, flags);
182 
183 	return qcom_scm_convention;
184 }
185 
186 /**
187  * qcom_scm_call() - Invoke a syscall in the secure world
188  * @dev:	device
189  * @svc_id:	service identifier
190  * @cmd_id:	command identifier
191  * @desc:	Descriptor structure containing arguments and return values
192  *
193  * Sends a command to the SCM and waits for the command to finish processing.
194  * This should *only* be called in pre-emptible context.
195  */
qcom_scm_call(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)196 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
197 			 struct qcom_scm_res *res)
198 {
199 	might_sleep();
200 	switch (__get_convention()) {
201 	case SMC_CONVENTION_ARM_32:
202 	case SMC_CONVENTION_ARM_64:
203 		return scm_smc_call(dev, desc, res, false);
204 	case SMC_CONVENTION_LEGACY:
205 		return scm_legacy_call(dev, desc, res);
206 	default:
207 		pr_err("Unknown current SCM calling convention.\n");
208 		return -EINVAL;
209 	}
210 }
211 
212 /**
213  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
214  * @dev:	device
215  * @svc_id:	service identifier
216  * @cmd_id:	command identifier
217  * @desc:	Descriptor structure containing arguments and return values
218  * @res:	Structure containing results from SMC/HVC call
219  *
220  * Sends a command to the SCM and waits for the command to finish processing.
221  * This can be called in atomic context.
222  */
qcom_scm_call_atomic(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)223 static int qcom_scm_call_atomic(struct device *dev,
224 				const struct qcom_scm_desc *desc,
225 				struct qcom_scm_res *res)
226 {
227 	switch (__get_convention()) {
228 	case SMC_CONVENTION_ARM_32:
229 	case SMC_CONVENTION_ARM_64:
230 		return scm_smc_call(dev, desc, res, true);
231 	case SMC_CONVENTION_LEGACY:
232 		return scm_legacy_call_atomic(dev, desc, res);
233 	default:
234 		pr_err("Unknown current SCM calling convention.\n");
235 		return -EINVAL;
236 	}
237 }
238 
__qcom_scm_is_call_available(struct device * dev,u32 svc_id,u32 cmd_id)239 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
240 					 u32 cmd_id)
241 {
242 	int ret;
243 	struct qcom_scm_desc desc = {
244 		.svc = QCOM_SCM_SVC_INFO,
245 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
246 		.owner = ARM_SMCCC_OWNER_SIP,
247 	};
248 	struct qcom_scm_res res;
249 
250 	desc.arginfo = QCOM_SCM_ARGS(1);
251 	switch (__get_convention()) {
252 	case SMC_CONVENTION_ARM_32:
253 	case SMC_CONVENTION_ARM_64:
254 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
255 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
256 		break;
257 	case SMC_CONVENTION_LEGACY:
258 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
259 		break;
260 	default:
261 		pr_err("Unknown SMC convention being used\n");
262 		return false;
263 	}
264 
265 	ret = qcom_scm_call(dev, &desc, &res);
266 
267 	return ret ? false : !!res.result[0];
268 }
269 
270 /**
271  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
272  * @entry: Entry point function for the cpus
273  * @cpus: The cpumask of cpus that will use the entry point
274  *
275  * Set the Linux entry point for the SCM to transfer control to when coming
276  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
277  */
qcom_scm_set_warm_boot_addr(void * entry,const cpumask_t * cpus)278 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
279 {
280 	int ret;
281 	int flags = 0;
282 	int cpu;
283 	struct qcom_scm_desc desc = {
284 		.svc = QCOM_SCM_SVC_BOOT,
285 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
286 		.arginfo = QCOM_SCM_ARGS(2),
287 	};
288 
289 	/*
290 	 * Reassign only if we are switching from hotplug entry point
291 	 * to cpuidle entry point or vice versa.
292 	 */
293 	for_each_cpu(cpu, cpus) {
294 		if (entry == qcom_scm_wb[cpu].entry)
295 			continue;
296 		flags |= qcom_scm_wb[cpu].flag;
297 	}
298 
299 	/* No change in entry function */
300 	if (!flags)
301 		return 0;
302 
303 	desc.args[0] = flags;
304 	desc.args[1] = virt_to_phys(entry);
305 
306 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
307 	if (!ret) {
308 		for_each_cpu(cpu, cpus)
309 			qcom_scm_wb[cpu].entry = entry;
310 	}
311 
312 	return ret;
313 }
314 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
315 
316 /**
317  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
318  * @entry: Entry point function for the cpus
319  * @cpus: The cpumask of cpus that will use the entry point
320  *
321  * Set the cold boot address of the cpus. Any cpu outside the supported
322  * range would be removed from the cpu present mask.
323  */
qcom_scm_set_cold_boot_addr(void * entry,const cpumask_t * cpus)324 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
325 {
326 	int flags = 0;
327 	int cpu;
328 	int scm_cb_flags[] = {
329 		QCOM_SCM_FLAG_COLDBOOT_CPU0,
330 		QCOM_SCM_FLAG_COLDBOOT_CPU1,
331 		QCOM_SCM_FLAG_COLDBOOT_CPU2,
332 		QCOM_SCM_FLAG_COLDBOOT_CPU3,
333 	};
334 	struct qcom_scm_desc desc = {
335 		.svc = QCOM_SCM_SVC_BOOT,
336 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
337 		.arginfo = QCOM_SCM_ARGS(2),
338 		.owner = ARM_SMCCC_OWNER_SIP,
339 	};
340 
341 	if (!cpus || (cpus && cpumask_empty(cpus)))
342 		return -EINVAL;
343 
344 	for_each_cpu(cpu, cpus) {
345 		if (cpu < ARRAY_SIZE(scm_cb_flags))
346 			flags |= scm_cb_flags[cpu];
347 		else
348 			set_cpu_present(cpu, false);
349 	}
350 
351 	desc.args[0] = flags;
352 	desc.args[1] = virt_to_phys(entry);
353 
354 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
355 }
356 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
357 
358 /**
359  * qcom_scm_cpu_power_down() - Power down the cpu
360  * @flags - Flags to flush cache
361  *
362  * This is an end point to power down cpu. If there was a pending interrupt,
363  * the control would return from this function, otherwise, the cpu jumps to the
364  * warm boot entry point set for this cpu upon reset.
365  */
qcom_scm_cpu_power_down(u32 flags)366 void qcom_scm_cpu_power_down(u32 flags)
367 {
368 	struct qcom_scm_desc desc = {
369 		.svc = QCOM_SCM_SVC_BOOT,
370 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
371 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
372 		.arginfo = QCOM_SCM_ARGS(1),
373 		.owner = ARM_SMCCC_OWNER_SIP,
374 	};
375 
376 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
377 }
378 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
379 
qcom_scm_set_remote_state(u32 state,u32 id)380 int qcom_scm_set_remote_state(u32 state, u32 id)
381 {
382 	struct qcom_scm_desc desc = {
383 		.svc = QCOM_SCM_SVC_BOOT,
384 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
385 		.arginfo = QCOM_SCM_ARGS(2),
386 		.args[0] = state,
387 		.args[1] = id,
388 		.owner = ARM_SMCCC_OWNER_SIP,
389 	};
390 	struct qcom_scm_res res;
391 	int ret;
392 
393 	ret = qcom_scm_call(__scm->dev, &desc, &res);
394 
395 	return ret ? : res.result[0];
396 }
397 EXPORT_SYMBOL(qcom_scm_set_remote_state);
398 
__qcom_scm_set_dload_mode(struct device * dev,bool enable)399 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
400 {
401 	struct qcom_scm_desc desc = {
402 		.svc = QCOM_SCM_SVC_BOOT,
403 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
404 		.arginfo = QCOM_SCM_ARGS(2),
405 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
406 		.owner = ARM_SMCCC_OWNER_SIP,
407 	};
408 
409 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
410 
411 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
412 }
413 
qcom_scm_set_download_mode(bool enable)414 static void qcom_scm_set_download_mode(bool enable)
415 {
416 	bool avail;
417 	int ret = 0;
418 
419 	avail = __qcom_scm_is_call_available(__scm->dev,
420 					     QCOM_SCM_SVC_BOOT,
421 					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
422 	if (avail) {
423 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
424 	} else if (__scm->dload_mode_addr) {
425 		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
426 				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
427 	} else {
428 		dev_err(__scm->dev,
429 			"No available mechanism for setting download mode\n");
430 	}
431 
432 	if (ret)
433 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
434 }
435 
436 /**
437  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
438  *			       state machine for a given peripheral, using the
439  *			       metadata
440  * @peripheral: peripheral id
441  * @metadata:	pointer to memory containing ELF header, program header table
442  *		and optional blob of data used for authenticating the metadata
443  *		and the rest of the firmware
444  * @size:	size of the metadata
445  *
446  * Returns 0 on success.
447  */
qcom_scm_pas_init_image(u32 peripheral,const void * metadata,size_t size)448 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
449 {
450 	dma_addr_t mdata_phys;
451 	void *mdata_buf;
452 	int ret;
453 	struct qcom_scm_desc desc = {
454 		.svc = QCOM_SCM_SVC_PIL,
455 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
456 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
457 		.args[0] = peripheral,
458 		.owner = ARM_SMCCC_OWNER_SIP,
459 	};
460 	struct qcom_scm_res res;
461 
462 	/*
463 	 * During the scm call memory protection will be enabled for the meta
464 	 * data blob, so make sure it's physically contiguous, 4K aligned and
465 	 * non-cachable to avoid XPU violations.
466 	 */
467 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
468 				       GFP_KERNEL);
469 	if (!mdata_buf) {
470 		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
471 		return -ENOMEM;
472 	}
473 	memcpy(mdata_buf, metadata, size);
474 
475 	ret = qcom_scm_clk_enable();
476 	if (ret)
477 		goto free_metadata;
478 
479 	desc.args[1] = mdata_phys;
480 
481 	ret = qcom_scm_call(__scm->dev, &desc, &res);
482 
483 	qcom_scm_clk_disable();
484 
485 free_metadata:
486 	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
487 
488 	return ret ? : res.result[0];
489 }
490 EXPORT_SYMBOL(qcom_scm_pas_init_image);
491 
492 /**
493  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
494  *			      for firmware loading
495  * @peripheral:	peripheral id
496  * @addr:	start address of memory area to prepare
497  * @size:	size of the memory area to prepare
498  *
499  * Returns 0 on success.
500  */
qcom_scm_pas_mem_setup(u32 peripheral,phys_addr_t addr,phys_addr_t size)501 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
502 {
503 	int ret;
504 	struct qcom_scm_desc desc = {
505 		.svc = QCOM_SCM_SVC_PIL,
506 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
507 		.arginfo = QCOM_SCM_ARGS(3),
508 		.args[0] = peripheral,
509 		.args[1] = addr,
510 		.args[2] = size,
511 		.owner = ARM_SMCCC_OWNER_SIP,
512 	};
513 	struct qcom_scm_res res;
514 
515 	ret = qcom_scm_clk_enable();
516 	if (ret)
517 		return ret;
518 
519 	ret = qcom_scm_call(__scm->dev, &desc, &res);
520 	qcom_scm_clk_disable();
521 
522 	return ret ? : res.result[0];
523 }
524 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
525 
526 /**
527  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
528  *				   and reset the remote processor
529  * @peripheral:	peripheral id
530  *
531  * Return 0 on success.
532  */
qcom_scm_pas_auth_and_reset(u32 peripheral)533 int qcom_scm_pas_auth_and_reset(u32 peripheral)
534 {
535 	int ret;
536 	struct qcom_scm_desc desc = {
537 		.svc = QCOM_SCM_SVC_PIL,
538 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
539 		.arginfo = QCOM_SCM_ARGS(1),
540 		.args[0] = peripheral,
541 		.owner = ARM_SMCCC_OWNER_SIP,
542 	};
543 	struct qcom_scm_res res;
544 
545 	ret = qcom_scm_clk_enable();
546 	if (ret)
547 		return ret;
548 
549 	ret = qcom_scm_call(__scm->dev, &desc, &res);
550 	qcom_scm_clk_disable();
551 
552 	return ret ? : res.result[0];
553 }
554 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
555 
556 /**
557  * qcom_scm_pas_shutdown() - Shut down the remote processor
558  * @peripheral: peripheral id
559  *
560  * Returns 0 on success.
561  */
qcom_scm_pas_shutdown(u32 peripheral)562 int qcom_scm_pas_shutdown(u32 peripheral)
563 {
564 	int ret;
565 	struct qcom_scm_desc desc = {
566 		.svc = QCOM_SCM_SVC_PIL,
567 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
568 		.arginfo = QCOM_SCM_ARGS(1),
569 		.args[0] = peripheral,
570 		.owner = ARM_SMCCC_OWNER_SIP,
571 	};
572 	struct qcom_scm_res res;
573 
574 	ret = qcom_scm_clk_enable();
575 	if (ret)
576 		return ret;
577 
578 	ret = qcom_scm_call(__scm->dev, &desc, &res);
579 
580 	qcom_scm_clk_disable();
581 
582 	return ret ? : res.result[0];
583 }
584 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
585 
586 /**
587  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
588  *			      available for the given peripherial
589  * @peripheral:	peripheral id
590  *
591  * Returns true if PAS is supported for this peripheral, otherwise false.
592  */
qcom_scm_pas_supported(u32 peripheral)593 bool qcom_scm_pas_supported(u32 peripheral)
594 {
595 	int ret;
596 	struct qcom_scm_desc desc = {
597 		.svc = QCOM_SCM_SVC_PIL,
598 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
599 		.arginfo = QCOM_SCM_ARGS(1),
600 		.args[0] = peripheral,
601 		.owner = ARM_SMCCC_OWNER_SIP,
602 	};
603 	struct qcom_scm_res res;
604 
605 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
606 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
607 		return false;
608 
609 	ret = qcom_scm_call(__scm->dev, &desc, &res);
610 
611 	return ret ? false : !!res.result[0];
612 }
613 EXPORT_SYMBOL(qcom_scm_pas_supported);
614 
__qcom_scm_pas_mss_reset(struct device * dev,bool reset)615 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
616 {
617 	struct qcom_scm_desc desc = {
618 		.svc = QCOM_SCM_SVC_PIL,
619 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
620 		.arginfo = QCOM_SCM_ARGS(2),
621 		.args[0] = reset,
622 		.args[1] = 0,
623 		.owner = ARM_SMCCC_OWNER_SIP,
624 	};
625 	struct qcom_scm_res res;
626 	int ret;
627 
628 	ret = qcom_scm_call(__scm->dev, &desc, &res);
629 
630 	return ret ? : res.result[0];
631 }
632 
qcom_scm_pas_reset_assert(struct reset_controller_dev * rcdev,unsigned long idx)633 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
634 				     unsigned long idx)
635 {
636 	if (idx != 0)
637 		return -EINVAL;
638 
639 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
640 }
641 
qcom_scm_pas_reset_deassert(struct reset_controller_dev * rcdev,unsigned long idx)642 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
643 				       unsigned long idx)
644 {
645 	if (idx != 0)
646 		return -EINVAL;
647 
648 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
649 }
650 
651 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
652 	.assert = qcom_scm_pas_reset_assert,
653 	.deassert = qcom_scm_pas_reset_deassert,
654 };
655 
qcom_scm_io_readl(phys_addr_t addr,unsigned int * val)656 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
657 {
658 	struct qcom_scm_desc desc = {
659 		.svc = QCOM_SCM_SVC_IO,
660 		.cmd = QCOM_SCM_IO_READ,
661 		.arginfo = QCOM_SCM_ARGS(1),
662 		.args[0] = addr,
663 		.owner = ARM_SMCCC_OWNER_SIP,
664 	};
665 	struct qcom_scm_res res;
666 	int ret;
667 
668 
669 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
670 	if (ret >= 0)
671 		*val = res.result[0];
672 
673 	return ret < 0 ? ret : 0;
674 }
675 EXPORT_SYMBOL(qcom_scm_io_readl);
676 
qcom_scm_io_writel(phys_addr_t addr,unsigned int val)677 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
678 {
679 	struct qcom_scm_desc desc = {
680 		.svc = QCOM_SCM_SVC_IO,
681 		.cmd = QCOM_SCM_IO_WRITE,
682 		.arginfo = QCOM_SCM_ARGS(2),
683 		.args[0] = addr,
684 		.args[1] = val,
685 		.owner = ARM_SMCCC_OWNER_SIP,
686 	};
687 
688 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
689 }
690 EXPORT_SYMBOL(qcom_scm_io_writel);
691 
692 /**
693  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
694  * supports restore security config interface.
695  *
696  * Return true if restore-cfg interface is supported, false if not.
697  */
qcom_scm_restore_sec_cfg_available(void)698 bool qcom_scm_restore_sec_cfg_available(void)
699 {
700 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
701 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
702 }
703 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
704 
qcom_scm_restore_sec_cfg(u32 device_id,u32 spare)705 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
706 {
707 	struct qcom_scm_desc desc = {
708 		.svc = QCOM_SCM_SVC_MP,
709 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
710 		.arginfo = QCOM_SCM_ARGS(2),
711 		.args[0] = device_id,
712 		.args[1] = spare,
713 		.owner = ARM_SMCCC_OWNER_SIP,
714 	};
715 	struct qcom_scm_res res;
716 	int ret;
717 
718 	ret = qcom_scm_call(__scm->dev, &desc, &res);
719 
720 	return ret ? : res.result[0];
721 }
722 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
723 
qcom_scm_iommu_secure_ptbl_size(u32 spare,size_t * size)724 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
725 {
726 	struct qcom_scm_desc desc = {
727 		.svc = QCOM_SCM_SVC_MP,
728 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
729 		.arginfo = QCOM_SCM_ARGS(1),
730 		.args[0] = spare,
731 		.owner = ARM_SMCCC_OWNER_SIP,
732 	};
733 	struct qcom_scm_res res;
734 	int ret;
735 
736 	ret = qcom_scm_call(__scm->dev, &desc, &res);
737 
738 	if (size)
739 		*size = res.result[0];
740 
741 	return ret ? : res.result[1];
742 }
743 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
744 
qcom_scm_iommu_secure_ptbl_init(u64 addr,u32 size,u32 spare)745 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
746 {
747 	struct qcom_scm_desc desc = {
748 		.svc = QCOM_SCM_SVC_MP,
749 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
750 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
751 					 QCOM_SCM_VAL),
752 		.args[0] = addr,
753 		.args[1] = size,
754 		.args[2] = spare,
755 		.owner = ARM_SMCCC_OWNER_SIP,
756 	};
757 	int ret;
758 
759 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
760 
761 	/* the pg table has been initialized already, ignore the error */
762 	if (ret == -EPERM)
763 		ret = 0;
764 
765 	return ret;
766 }
767 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
768 
qcom_scm_mem_protect_video_var(u32 cp_start,u32 cp_size,u32 cp_nonpixel_start,u32 cp_nonpixel_size)769 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
770 				   u32 cp_nonpixel_start,
771 				   u32 cp_nonpixel_size)
772 {
773 	int ret;
774 	struct qcom_scm_desc desc = {
775 		.svc = QCOM_SCM_SVC_MP,
776 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
777 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
778 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
779 		.args[0] = cp_start,
780 		.args[1] = cp_size,
781 		.args[2] = cp_nonpixel_start,
782 		.args[3] = cp_nonpixel_size,
783 		.owner = ARM_SMCCC_OWNER_SIP,
784 	};
785 	struct qcom_scm_res res;
786 
787 	ret = qcom_scm_call(__scm->dev, &desc, &res);
788 
789 	return ret ? : res.result[0];
790 }
791 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
792 
__qcom_scm_assign_mem(struct device * dev,phys_addr_t mem_region,size_t mem_sz,phys_addr_t src,size_t src_sz,phys_addr_t dest,size_t dest_sz)793 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
794 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
795 				 phys_addr_t dest, size_t dest_sz)
796 {
797 	int ret;
798 	struct qcom_scm_desc desc = {
799 		.svc = QCOM_SCM_SVC_MP,
800 		.cmd = QCOM_SCM_MP_ASSIGN,
801 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
802 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
803 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
804 		.args[0] = mem_region,
805 		.args[1] = mem_sz,
806 		.args[2] = src,
807 		.args[3] = src_sz,
808 		.args[4] = dest,
809 		.args[5] = dest_sz,
810 		.args[6] = 0,
811 		.owner = ARM_SMCCC_OWNER_SIP,
812 	};
813 	struct qcom_scm_res res;
814 
815 	ret = qcom_scm_call(dev, &desc, &res);
816 
817 	return ret ? : res.result[0];
818 }
819 
820 /**
821  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
822  * @mem_addr: mem region whose ownership need to be reassigned
823  * @mem_sz:   size of the region.
824  * @srcvm:    vmid for current set of owners, each set bit in
825  *            flag indicate a unique owner
826  * @newvm:    array having new owners and corresponding permission
827  *            flags
828  * @dest_cnt: number of owners in next set.
829  *
830  * Return negative errno on failure or 0 on success with @srcvm updated.
831  */
qcom_scm_assign_mem(phys_addr_t mem_addr,size_t mem_sz,unsigned int * srcvm,const struct qcom_scm_vmperm * newvm,unsigned int dest_cnt)832 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
833 			unsigned int *srcvm,
834 			const struct qcom_scm_vmperm *newvm,
835 			unsigned int dest_cnt)
836 {
837 	struct qcom_scm_current_perm_info *destvm;
838 	struct qcom_scm_mem_map_info *mem_to_map;
839 	phys_addr_t mem_to_map_phys;
840 	phys_addr_t dest_phys;
841 	dma_addr_t ptr_phys;
842 	size_t mem_to_map_sz;
843 	size_t dest_sz;
844 	size_t src_sz;
845 	size_t ptr_sz;
846 	int next_vm;
847 	__le32 *src;
848 	void *ptr;
849 	int ret, i, b;
850 	unsigned long srcvm_bits = *srcvm;
851 
852 	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
853 	mem_to_map_sz = sizeof(*mem_to_map);
854 	dest_sz = dest_cnt * sizeof(*destvm);
855 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
856 			ALIGN(dest_sz, SZ_64);
857 
858 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
859 	if (!ptr)
860 		return -ENOMEM;
861 
862 	/* Fill source vmid detail */
863 	src = ptr;
864 	i = 0;
865 	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
866 		src[i++] = cpu_to_le32(b);
867 
868 	/* Fill details of mem buff to map */
869 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
870 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
871 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
872 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
873 
874 	next_vm = 0;
875 	/* Fill details of next vmid detail */
876 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
877 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
878 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
879 		destvm->vmid = cpu_to_le32(newvm->vmid);
880 		destvm->perm = cpu_to_le32(newvm->perm);
881 		destvm->ctx = 0;
882 		destvm->ctx_size = 0;
883 		next_vm |= BIT(newvm->vmid);
884 	}
885 
886 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
887 				    ptr_phys, src_sz, dest_phys, dest_sz);
888 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
889 	if (ret) {
890 		dev_err(__scm->dev,
891 			"Assign memory protection call failed %d\n", ret);
892 		return -EINVAL;
893 	}
894 
895 	*srcvm = next_vm;
896 	return 0;
897 }
898 EXPORT_SYMBOL(qcom_scm_assign_mem);
899 
900 /**
901  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
902  */
qcom_scm_ocmem_lock_available(void)903 bool qcom_scm_ocmem_lock_available(void)
904 {
905 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
906 					    QCOM_SCM_OCMEM_LOCK_CMD);
907 }
908 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
909 
910 /**
911  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
912  * region to the specified initiator
913  *
914  * @id:     tz initiator id
915  * @offset: OCMEM offset
916  * @size:   OCMEM size
917  * @mode:   access mode (WIDE/NARROW)
918  */
qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id,u32 offset,u32 size,u32 mode)919 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
920 			u32 mode)
921 {
922 	struct qcom_scm_desc desc = {
923 		.svc = QCOM_SCM_SVC_OCMEM,
924 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
925 		.args[0] = id,
926 		.args[1] = offset,
927 		.args[2] = size,
928 		.args[3] = mode,
929 		.arginfo = QCOM_SCM_ARGS(4),
930 	};
931 
932 	return qcom_scm_call(__scm->dev, &desc, NULL);
933 }
934 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
935 
936 /**
937  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
938  * region from the specified initiator
939  *
940  * @id:     tz initiator id
941  * @offset: OCMEM offset
942  * @size:   OCMEM size
943  */
qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,u32 offset,u32 size)944 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
945 {
946 	struct qcom_scm_desc desc = {
947 		.svc = QCOM_SCM_SVC_OCMEM,
948 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
949 		.args[0] = id,
950 		.args[1] = offset,
951 		.args[2] = size,
952 		.arginfo = QCOM_SCM_ARGS(3),
953 	};
954 
955 	return qcom_scm_call(__scm->dev, &desc, NULL);
956 }
957 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
958 
959 /**
960  * qcom_scm_ice_available() - Is the ICE key programming interface available?
961  *
962  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
963  *	   qcom_scm_ice_set_key() are available.
964  */
qcom_scm_ice_available(void)965 bool qcom_scm_ice_available(void)
966 {
967 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
968 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
969 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
970 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
971 }
972 EXPORT_SYMBOL(qcom_scm_ice_available);
973 
974 /**
975  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
976  * @index: the keyslot to invalidate
977  *
978  * The UFSHCI and eMMC standards define a standard way to do this, but it
979  * doesn't work on these SoCs; only this SCM call does.
980  *
981  * It is assumed that the SoC has only one ICE instance being used, as this SCM
982  * call doesn't specify which ICE instance the keyslot belongs to.
983  *
984  * Return: 0 on success; -errno on failure.
985  */
qcom_scm_ice_invalidate_key(u32 index)986 int qcom_scm_ice_invalidate_key(u32 index)
987 {
988 	struct qcom_scm_desc desc = {
989 		.svc = QCOM_SCM_SVC_ES,
990 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
991 		.arginfo = QCOM_SCM_ARGS(1),
992 		.args[0] = index,
993 		.owner = ARM_SMCCC_OWNER_SIP,
994 	};
995 
996 	return qcom_scm_call(__scm->dev, &desc, NULL);
997 }
998 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
999 
1000 /**
1001  * qcom_scm_ice_set_key() - Set an inline encryption key
1002  * @index: the keyslot into which to set the key
1003  * @key: the key to program
1004  * @key_size: the size of the key in bytes
1005  * @cipher: the encryption algorithm the key is for
1006  * @data_unit_size: the encryption data unit size, i.e. the size of each
1007  *		    individual plaintext and ciphertext.  Given in 512-byte
1008  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1009  *
1010  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1011  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1012  *
1013  * The UFSHCI and eMMC standards define a standard way to do this, but it
1014  * doesn't work on these SoCs; only this SCM call does.
1015  *
1016  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1017  * call doesn't specify which ICE instance the keyslot belongs to.
1018  *
1019  * Return: 0 on success; -errno on failure.
1020  */
qcom_scm_ice_set_key(u32 index,const u8 * key,u32 key_size,enum qcom_scm_ice_cipher cipher,u32 data_unit_size)1021 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1022 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1023 {
1024 	struct qcom_scm_desc desc = {
1025 		.svc = QCOM_SCM_SVC_ES,
1026 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1027 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1028 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1029 					 QCOM_SCM_VAL),
1030 		.args[0] = index,
1031 		.args[2] = key_size,
1032 		.args[3] = cipher,
1033 		.args[4] = data_unit_size,
1034 		.owner = ARM_SMCCC_OWNER_SIP,
1035 	};
1036 	void *keybuf;
1037 	dma_addr_t key_phys;
1038 	int ret;
1039 
1040 	/*
1041 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1042 	 * physical address that's been properly flushed.  The sanctioned way to
1043 	 * do this is by using the DMA API.  But as is best practice for crypto
1044 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1045 	 * dma_map_single() not clearly correct, since the DMA API can use
1046 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1047 	 * keys is normally rare and thus not performance-critical.
1048 	 */
1049 
1050 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1051 				    GFP_KERNEL);
1052 	if (!keybuf)
1053 		return -ENOMEM;
1054 	memcpy(keybuf, key, key_size);
1055 	desc.args[1] = key_phys;
1056 
1057 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1058 
1059 	memzero_explicit(keybuf, key_size);
1060 
1061 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1062 	return ret;
1063 }
1064 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1065 
1066 /**
1067  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1068  *
1069  * Return true if HDCP is supported, false if not.
1070  */
qcom_scm_hdcp_available(void)1071 bool qcom_scm_hdcp_available(void)
1072 {
1073 	bool avail;
1074 	int ret = qcom_scm_clk_enable();
1075 
1076 	if (ret)
1077 		return ret;
1078 
1079 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1080 						QCOM_SCM_HDCP_INVOKE);
1081 
1082 	qcom_scm_clk_disable();
1083 
1084 	return avail;
1085 }
1086 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1087 
1088 /**
1089  * qcom_scm_hdcp_req() - Send HDCP request.
1090  * @req: HDCP request array
1091  * @req_cnt: HDCP request array count
1092  * @resp: response buffer passed to SCM
1093  *
1094  * Write HDCP register(s) through SCM.
1095  */
qcom_scm_hdcp_req(struct qcom_scm_hdcp_req * req,u32 req_cnt,u32 * resp)1096 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1097 {
1098 	int ret;
1099 	struct qcom_scm_desc desc = {
1100 		.svc = QCOM_SCM_SVC_HDCP,
1101 		.cmd = QCOM_SCM_HDCP_INVOKE,
1102 		.arginfo = QCOM_SCM_ARGS(10),
1103 		.args = {
1104 			req[0].addr,
1105 			req[0].val,
1106 			req[1].addr,
1107 			req[1].val,
1108 			req[2].addr,
1109 			req[2].val,
1110 			req[3].addr,
1111 			req[3].val,
1112 			req[4].addr,
1113 			req[4].val
1114 		},
1115 		.owner = ARM_SMCCC_OWNER_SIP,
1116 	};
1117 	struct qcom_scm_res res;
1118 
1119 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1120 		return -ERANGE;
1121 
1122 	ret = qcom_scm_clk_enable();
1123 	if (ret)
1124 		return ret;
1125 
1126 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1127 	*resp = res.result[0];
1128 
1129 	qcom_scm_clk_disable();
1130 
1131 	return ret;
1132 }
1133 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1134 
qcom_scm_qsmmu500_wait_safe_toggle(bool en)1135 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1136 {
1137 	struct qcom_scm_desc desc = {
1138 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1139 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1140 		.arginfo = QCOM_SCM_ARGS(2),
1141 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1142 		.args[1] = en,
1143 		.owner = ARM_SMCCC_OWNER_SIP,
1144 	};
1145 
1146 
1147 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1148 }
1149 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1150 
qcom_scm_find_dload_address(struct device * dev,u64 * addr)1151 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1152 {
1153 	struct device_node *tcsr;
1154 	struct device_node *np = dev->of_node;
1155 	struct resource res;
1156 	u32 offset;
1157 	int ret;
1158 
1159 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1160 	if (!tcsr)
1161 		return 0;
1162 
1163 	ret = of_address_to_resource(tcsr, 0, &res);
1164 	of_node_put(tcsr);
1165 	if (ret)
1166 		return ret;
1167 
1168 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1169 	if (ret < 0)
1170 		return ret;
1171 
1172 	*addr = res.start + offset;
1173 
1174 	return 0;
1175 }
1176 
1177 /**
1178  * qcom_scm_is_available() - Checks if SCM is available
1179  */
qcom_scm_is_available(void)1180 bool qcom_scm_is_available(void)
1181 {
1182 	return !!__scm;
1183 }
1184 EXPORT_SYMBOL(qcom_scm_is_available);
1185 
qcom_scm_probe(struct platform_device * pdev)1186 static int qcom_scm_probe(struct platform_device *pdev)
1187 {
1188 	struct qcom_scm *scm;
1189 	unsigned long clks;
1190 	int ret;
1191 
1192 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1193 	if (!scm)
1194 		return -ENOMEM;
1195 
1196 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1197 	if (ret < 0)
1198 		return ret;
1199 
1200 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1201 
1202 	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1203 	if (IS_ERR(scm->core_clk)) {
1204 		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1205 			return PTR_ERR(scm->core_clk);
1206 
1207 		if (clks & SCM_HAS_CORE_CLK) {
1208 			dev_err(&pdev->dev, "failed to acquire core clk\n");
1209 			return PTR_ERR(scm->core_clk);
1210 		}
1211 
1212 		scm->core_clk = NULL;
1213 	}
1214 
1215 	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1216 	if (IS_ERR(scm->iface_clk)) {
1217 		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1218 			return PTR_ERR(scm->iface_clk);
1219 
1220 		if (clks & SCM_HAS_IFACE_CLK) {
1221 			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1222 			return PTR_ERR(scm->iface_clk);
1223 		}
1224 
1225 		scm->iface_clk = NULL;
1226 	}
1227 
1228 	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1229 	if (IS_ERR(scm->bus_clk)) {
1230 		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1231 			return PTR_ERR(scm->bus_clk);
1232 
1233 		if (clks & SCM_HAS_BUS_CLK) {
1234 			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1235 			return PTR_ERR(scm->bus_clk);
1236 		}
1237 
1238 		scm->bus_clk = NULL;
1239 	}
1240 
1241 	scm->reset.ops = &qcom_scm_pas_reset_ops;
1242 	scm->reset.nr_resets = 1;
1243 	scm->reset.of_node = pdev->dev.of_node;
1244 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1245 	if (ret)
1246 		return ret;
1247 
1248 	/* vote for max clk rate for highest performance */
1249 	ret = clk_set_rate(scm->core_clk, INT_MAX);
1250 	if (ret)
1251 		return ret;
1252 
1253 	__scm = scm;
1254 	__scm->dev = &pdev->dev;
1255 
1256 	__get_convention();
1257 
1258 	/*
1259 	 * If requested enable "download mode", from this point on warmboot
1260 	 * will cause the the boot stages to enter download mode, unless
1261 	 * disabled below by a clean shutdown/reboot.
1262 	 */
1263 	if (download_mode)
1264 		qcom_scm_set_download_mode(true);
1265 
1266 	return 0;
1267 }
1268 
qcom_scm_shutdown(struct platform_device * pdev)1269 static void qcom_scm_shutdown(struct platform_device *pdev)
1270 {
1271 	/* Clean shutdown, disable download mode to allow normal restart */
1272 	qcom_scm_set_download_mode(false);
1273 }
1274 
1275 static const struct of_device_id qcom_scm_dt_match[] = {
1276 	{ .compatible = "qcom,scm-apq8064",
1277 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1278 	},
1279 	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1280 							     SCM_HAS_IFACE_CLK |
1281 							     SCM_HAS_BUS_CLK)
1282 	},
1283 	{ .compatible = "qcom,scm-ipq4019" },
1284 	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1285 	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1286 	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1287 							     SCM_HAS_IFACE_CLK |
1288 							     SCM_HAS_BUS_CLK)
1289 	},
1290 	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1291 							     SCM_HAS_IFACE_CLK |
1292 							     SCM_HAS_BUS_CLK)
1293 	},
1294 	{ .compatible = "qcom,scm-msm8994" },
1295 	{ .compatible = "qcom,scm-msm8996" },
1296 	{ .compatible = "qcom,scm" },
1297 	{}
1298 };
1299 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1300 
1301 static struct platform_driver qcom_scm_driver = {
1302 	.driver = {
1303 		.name	= "qcom_scm",
1304 		.of_match_table = qcom_scm_dt_match,
1305 	},
1306 	.probe = qcom_scm_probe,
1307 	.shutdown = qcom_scm_shutdown,
1308 };
1309 
qcom_scm_init(void)1310 static int __init qcom_scm_init(void)
1311 {
1312 	return platform_driver_register(&qcom_scm_driver);
1313 }
1314 subsys_initcall(qcom_scm_init);
1315 
1316 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1317 MODULE_LICENSE("GPL v2");
1318