1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
4 */
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
19
20 #include "qcom_scm.h"
21
22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23 module_param(download_mode, bool, 0);
24
25 #define SCM_HAS_CORE_CLK BIT(0)
26 #define SCM_HAS_IFACE_CLK BIT(1)
27 #define SCM_HAS_BUS_CLK BIT(2)
28
29 struct qcom_scm {
30 struct device *dev;
31 struct clk *core_clk;
32 struct clk *iface_clk;
33 struct clk *bus_clk;
34 struct reset_controller_dev reset;
35
36 u64 dload_mode_addr;
37 };
38
39 struct qcom_scm_current_perm_info {
40 __le32 vmid;
41 __le32 perm;
42 __le64 ctx;
43 __le32 ctx_size;
44 __le32 unused;
45 };
46
47 struct qcom_scm_mem_map_info {
48 __le64 mem_addr;
49 __le64 mem_size;
50 };
51
52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
56
57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
61
62 struct qcom_scm_wb_entry {
63 int flag;
64 void *entry;
65 };
66
67 static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
72 };
73
74 static const char *qcom_scm_convention_names[] = {
75 [SMC_CONVENTION_UNKNOWN] = "unknown",
76 [SMC_CONVENTION_ARM_32] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY] = "smc legacy",
79 };
80
81 static struct qcom_scm *__scm;
82
qcom_scm_clk_enable(void)83 static int qcom_scm_clk_enable(void)
84 {
85 int ret;
86
87 ret = clk_prepare_enable(__scm->core_clk);
88 if (ret)
89 goto bail;
90
91 ret = clk_prepare_enable(__scm->iface_clk);
92 if (ret)
93 goto disable_core;
94
95 ret = clk_prepare_enable(__scm->bus_clk);
96 if (ret)
97 goto disable_iface;
98
99 return 0;
100
101 disable_iface:
102 clk_disable_unprepare(__scm->iface_clk);
103 disable_core:
104 clk_disable_unprepare(__scm->core_clk);
105 bail:
106 return ret;
107 }
108
qcom_scm_clk_disable(void)109 static void qcom_scm_clk_disable(void)
110 {
111 clk_disable_unprepare(__scm->core_clk);
112 clk_disable_unprepare(__scm->iface_clk);
113 clk_disable_unprepare(__scm->bus_clk);
114 }
115
116 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
117 static DEFINE_SPINLOCK(scm_query_lock);
118
__get_convention(void)119 static enum qcom_scm_convention __get_convention(void)
120 {
121 unsigned long flags;
122 struct qcom_scm_desc desc = {
123 .svc = QCOM_SCM_SVC_INFO,
124 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
125 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
126 QCOM_SCM_INFO_IS_CALL_AVAIL) |
127 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
128 .arginfo = QCOM_SCM_ARGS(1),
129 .owner = ARM_SMCCC_OWNER_SIP,
130 };
131 struct qcom_scm_res res;
132 enum qcom_scm_convention probed_convention;
133 int ret;
134 bool forced = false;
135
136 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
137 return qcom_scm_convention;
138
139 /*
140 * Device isn't required as there is only one argument - no device
141 * needed to dma_map_single to secure world
142 */
143 probed_convention = SMC_CONVENTION_ARM_64;
144 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
145 if (!ret && res.result[0] == 1)
146 goto found;
147
148 /*
149 * Some SC7180 firmwares didn't implement the
150 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
151 * calling conventions on these firmwares. Luckily we don't make any
152 * early calls into the firmware on these SoCs so the device pointer
153 * will be valid here to check if the compatible matches.
154 */
155 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
156 forced = true;
157 goto found;
158 }
159
160 probed_convention = SMC_CONVENTION_ARM_32;
161 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
162 if (!ret && res.result[0] == 1)
163 goto found;
164
165 probed_convention = SMC_CONVENTION_LEGACY;
166 found:
167 spin_lock_irqsave(&scm_query_lock, flags);
168 if (probed_convention != qcom_scm_convention) {
169 qcom_scm_convention = probed_convention;
170 pr_info("qcom_scm: convention: %s%s\n",
171 qcom_scm_convention_names[qcom_scm_convention],
172 forced ? " (forced)" : "");
173 }
174 spin_unlock_irqrestore(&scm_query_lock, flags);
175
176 return qcom_scm_convention;
177 }
178
179 /**
180 * qcom_scm_call() - Invoke a syscall in the secure world
181 * @dev: device
182 * @svc_id: service identifier
183 * @cmd_id: command identifier
184 * @desc: Descriptor structure containing arguments and return values
185 *
186 * Sends a command to the SCM and waits for the command to finish processing.
187 * This should *only* be called in pre-emptible context.
188 */
qcom_scm_call(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)189 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
190 struct qcom_scm_res *res)
191 {
192 might_sleep();
193 switch (__get_convention()) {
194 case SMC_CONVENTION_ARM_32:
195 case SMC_CONVENTION_ARM_64:
196 return scm_smc_call(dev, desc, res, false);
197 case SMC_CONVENTION_LEGACY:
198 return scm_legacy_call(dev, desc, res);
199 default:
200 pr_err("Unknown current SCM calling convention.\n");
201 return -EINVAL;
202 }
203 }
204
205 /**
206 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
207 * @dev: device
208 * @svc_id: service identifier
209 * @cmd_id: command identifier
210 * @desc: Descriptor structure containing arguments and return values
211 * @res: Structure containing results from SMC/HVC call
212 *
213 * Sends a command to the SCM and waits for the command to finish processing.
214 * This can be called in atomic context.
215 */
qcom_scm_call_atomic(struct device * dev,const struct qcom_scm_desc * desc,struct qcom_scm_res * res)216 static int qcom_scm_call_atomic(struct device *dev,
217 const struct qcom_scm_desc *desc,
218 struct qcom_scm_res *res)
219 {
220 switch (__get_convention()) {
221 case SMC_CONVENTION_ARM_32:
222 case SMC_CONVENTION_ARM_64:
223 return scm_smc_call(dev, desc, res, true);
224 case SMC_CONVENTION_LEGACY:
225 return scm_legacy_call_atomic(dev, desc, res);
226 default:
227 pr_err("Unknown current SCM calling convention.\n");
228 return -EINVAL;
229 }
230 }
231
__qcom_scm_is_call_available(struct device * dev,u32 svc_id,u32 cmd_id)232 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
233 u32 cmd_id)
234 {
235 int ret;
236 struct qcom_scm_desc desc = {
237 .svc = QCOM_SCM_SVC_INFO,
238 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
239 .owner = ARM_SMCCC_OWNER_SIP,
240 };
241 struct qcom_scm_res res;
242
243 desc.arginfo = QCOM_SCM_ARGS(1);
244 switch (__get_convention()) {
245 case SMC_CONVENTION_ARM_32:
246 case SMC_CONVENTION_ARM_64:
247 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
248 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
249 break;
250 case SMC_CONVENTION_LEGACY:
251 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
252 break;
253 default:
254 pr_err("Unknown SMC convention being used\n");
255 return false;
256 }
257
258 ret = qcom_scm_call(dev, &desc, &res);
259
260 return ret ? false : !!res.result[0];
261 }
262
263 /**
264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
265 * @entry: Entry point function for the cpus
266 * @cpus: The cpumask of cpus that will use the entry point
267 *
268 * Set the Linux entry point for the SCM to transfer control to when coming
269 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
270 */
qcom_scm_set_warm_boot_addr(void * entry,const cpumask_t * cpus)271 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
272 {
273 int ret;
274 int flags = 0;
275 int cpu;
276 struct qcom_scm_desc desc = {
277 .svc = QCOM_SCM_SVC_BOOT,
278 .cmd = QCOM_SCM_BOOT_SET_ADDR,
279 .arginfo = QCOM_SCM_ARGS(2),
280 };
281
282 /*
283 * Reassign only if we are switching from hotplug entry point
284 * to cpuidle entry point or vice versa.
285 */
286 for_each_cpu(cpu, cpus) {
287 if (entry == qcom_scm_wb[cpu].entry)
288 continue;
289 flags |= qcom_scm_wb[cpu].flag;
290 }
291
292 /* No change in entry function */
293 if (!flags)
294 return 0;
295
296 desc.args[0] = flags;
297 desc.args[1] = virt_to_phys(entry);
298
299 ret = qcom_scm_call(__scm->dev, &desc, NULL);
300 if (!ret) {
301 for_each_cpu(cpu, cpus)
302 qcom_scm_wb[cpu].entry = entry;
303 }
304
305 return ret;
306 }
307 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
308
309 /**
310 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
311 * @entry: Entry point function for the cpus
312 * @cpus: The cpumask of cpus that will use the entry point
313 *
314 * Set the cold boot address of the cpus. Any cpu outside the supported
315 * range would be removed from the cpu present mask.
316 */
qcom_scm_set_cold_boot_addr(void * entry,const cpumask_t * cpus)317 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
318 {
319 int flags = 0;
320 int cpu;
321 int scm_cb_flags[] = {
322 QCOM_SCM_FLAG_COLDBOOT_CPU0,
323 QCOM_SCM_FLAG_COLDBOOT_CPU1,
324 QCOM_SCM_FLAG_COLDBOOT_CPU2,
325 QCOM_SCM_FLAG_COLDBOOT_CPU3,
326 };
327 struct qcom_scm_desc desc = {
328 .svc = QCOM_SCM_SVC_BOOT,
329 .cmd = QCOM_SCM_BOOT_SET_ADDR,
330 .arginfo = QCOM_SCM_ARGS(2),
331 .owner = ARM_SMCCC_OWNER_SIP,
332 };
333
334 if (!cpus || (cpus && cpumask_empty(cpus)))
335 return -EINVAL;
336
337 for_each_cpu(cpu, cpus) {
338 if (cpu < ARRAY_SIZE(scm_cb_flags))
339 flags |= scm_cb_flags[cpu];
340 else
341 set_cpu_present(cpu, false);
342 }
343
344 desc.args[0] = flags;
345 desc.args[1] = virt_to_phys(entry);
346
347 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
348 }
349 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
350
351 /**
352 * qcom_scm_cpu_power_down() - Power down the cpu
353 * @flags - Flags to flush cache
354 *
355 * This is an end point to power down cpu. If there was a pending interrupt,
356 * the control would return from this function, otherwise, the cpu jumps to the
357 * warm boot entry point set for this cpu upon reset.
358 */
qcom_scm_cpu_power_down(u32 flags)359 void qcom_scm_cpu_power_down(u32 flags)
360 {
361 struct qcom_scm_desc desc = {
362 .svc = QCOM_SCM_SVC_BOOT,
363 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
364 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
365 .arginfo = QCOM_SCM_ARGS(1),
366 .owner = ARM_SMCCC_OWNER_SIP,
367 };
368
369 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
370 }
371 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
372
qcom_scm_set_remote_state(u32 state,u32 id)373 int qcom_scm_set_remote_state(u32 state, u32 id)
374 {
375 struct qcom_scm_desc desc = {
376 .svc = QCOM_SCM_SVC_BOOT,
377 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
378 .arginfo = QCOM_SCM_ARGS(2),
379 .args[0] = state,
380 .args[1] = id,
381 .owner = ARM_SMCCC_OWNER_SIP,
382 };
383 struct qcom_scm_res res;
384 int ret;
385
386 ret = qcom_scm_call(__scm->dev, &desc, &res);
387
388 return ret ? : res.result[0];
389 }
390 EXPORT_SYMBOL(qcom_scm_set_remote_state);
391
__qcom_scm_set_dload_mode(struct device * dev,bool enable)392 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
393 {
394 struct qcom_scm_desc desc = {
395 .svc = QCOM_SCM_SVC_BOOT,
396 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
397 .arginfo = QCOM_SCM_ARGS(2),
398 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
399 .owner = ARM_SMCCC_OWNER_SIP,
400 };
401
402 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
403
404 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
405 }
406
qcom_scm_set_download_mode(bool enable)407 static void qcom_scm_set_download_mode(bool enable)
408 {
409 bool avail;
410 int ret = 0;
411
412 avail = __qcom_scm_is_call_available(__scm->dev,
413 QCOM_SCM_SVC_BOOT,
414 QCOM_SCM_BOOT_SET_DLOAD_MODE);
415 if (avail) {
416 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
417 } else if (__scm->dload_mode_addr) {
418 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
419 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
420 } else {
421 dev_err(__scm->dev,
422 "No available mechanism for setting download mode\n");
423 }
424
425 if (ret)
426 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
427 }
428
429 /**
430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
431 * state machine for a given peripheral, using the
432 * metadata
433 * @peripheral: peripheral id
434 * @metadata: pointer to memory containing ELF header, program header table
435 * and optional blob of data used for authenticating the metadata
436 * and the rest of the firmware
437 * @size: size of the metadata
438 *
439 * Returns 0 on success.
440 */
qcom_scm_pas_init_image(u32 peripheral,const void * metadata,size_t size)441 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
442 {
443 dma_addr_t mdata_phys;
444 void *mdata_buf;
445 int ret;
446 struct qcom_scm_desc desc = {
447 .svc = QCOM_SCM_SVC_PIL,
448 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
449 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
450 .args[0] = peripheral,
451 .owner = ARM_SMCCC_OWNER_SIP,
452 };
453 struct qcom_scm_res res;
454
455 /*
456 * During the scm call memory protection will be enabled for the meta
457 * data blob, so make sure it's physically contiguous, 4K aligned and
458 * non-cachable to avoid XPU violations.
459 */
460 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
461 GFP_KERNEL);
462 if (!mdata_buf) {
463 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
464 return -ENOMEM;
465 }
466 memcpy(mdata_buf, metadata, size);
467
468 ret = qcom_scm_clk_enable();
469 if (ret)
470 goto free_metadata;
471
472 desc.args[1] = mdata_phys;
473
474 ret = qcom_scm_call(__scm->dev, &desc, &res);
475
476 qcom_scm_clk_disable();
477
478 free_metadata:
479 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
480
481 return ret ? : res.result[0];
482 }
483 EXPORT_SYMBOL(qcom_scm_pas_init_image);
484
485 /**
486 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
487 * for firmware loading
488 * @peripheral: peripheral id
489 * @addr: start address of memory area to prepare
490 * @size: size of the memory area to prepare
491 *
492 * Returns 0 on success.
493 */
qcom_scm_pas_mem_setup(u32 peripheral,phys_addr_t addr,phys_addr_t size)494 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
495 {
496 int ret;
497 struct qcom_scm_desc desc = {
498 .svc = QCOM_SCM_SVC_PIL,
499 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
500 .arginfo = QCOM_SCM_ARGS(3),
501 .args[0] = peripheral,
502 .args[1] = addr,
503 .args[2] = size,
504 .owner = ARM_SMCCC_OWNER_SIP,
505 };
506 struct qcom_scm_res res;
507
508 ret = qcom_scm_clk_enable();
509 if (ret)
510 return ret;
511
512 ret = qcom_scm_call(__scm->dev, &desc, &res);
513 qcom_scm_clk_disable();
514
515 return ret ? : res.result[0];
516 }
517 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
518
519 /**
520 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
521 * and reset the remote processor
522 * @peripheral: peripheral id
523 *
524 * Return 0 on success.
525 */
qcom_scm_pas_auth_and_reset(u32 peripheral)526 int qcom_scm_pas_auth_and_reset(u32 peripheral)
527 {
528 int ret;
529 struct qcom_scm_desc desc = {
530 .svc = QCOM_SCM_SVC_PIL,
531 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
532 .arginfo = QCOM_SCM_ARGS(1),
533 .args[0] = peripheral,
534 .owner = ARM_SMCCC_OWNER_SIP,
535 };
536 struct qcom_scm_res res;
537
538 ret = qcom_scm_clk_enable();
539 if (ret)
540 return ret;
541
542 ret = qcom_scm_call(__scm->dev, &desc, &res);
543 qcom_scm_clk_disable();
544
545 return ret ? : res.result[0];
546 }
547 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
548
549 /**
550 * qcom_scm_pas_shutdown() - Shut down the remote processor
551 * @peripheral: peripheral id
552 *
553 * Returns 0 on success.
554 */
qcom_scm_pas_shutdown(u32 peripheral)555 int qcom_scm_pas_shutdown(u32 peripheral)
556 {
557 int ret;
558 struct qcom_scm_desc desc = {
559 .svc = QCOM_SCM_SVC_PIL,
560 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
561 .arginfo = QCOM_SCM_ARGS(1),
562 .args[0] = peripheral,
563 .owner = ARM_SMCCC_OWNER_SIP,
564 };
565 struct qcom_scm_res res;
566
567 ret = qcom_scm_clk_enable();
568 if (ret)
569 return ret;
570
571 ret = qcom_scm_call(__scm->dev, &desc, &res);
572
573 qcom_scm_clk_disable();
574
575 return ret ? : res.result[0];
576 }
577 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
578
579 /**
580 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
581 * available for the given peripherial
582 * @peripheral: peripheral id
583 *
584 * Returns true if PAS is supported for this peripheral, otherwise false.
585 */
qcom_scm_pas_supported(u32 peripheral)586 bool qcom_scm_pas_supported(u32 peripheral)
587 {
588 int ret;
589 struct qcom_scm_desc desc = {
590 .svc = QCOM_SCM_SVC_PIL,
591 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
592 .arginfo = QCOM_SCM_ARGS(1),
593 .args[0] = peripheral,
594 .owner = ARM_SMCCC_OWNER_SIP,
595 };
596 struct qcom_scm_res res;
597
598 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
599 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
600 return false;
601
602 ret = qcom_scm_call(__scm->dev, &desc, &res);
603
604 return ret ? false : !!res.result[0];
605 }
606 EXPORT_SYMBOL(qcom_scm_pas_supported);
607
__qcom_scm_pas_mss_reset(struct device * dev,bool reset)608 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
609 {
610 struct qcom_scm_desc desc = {
611 .svc = QCOM_SCM_SVC_PIL,
612 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
613 .arginfo = QCOM_SCM_ARGS(2),
614 .args[0] = reset,
615 .args[1] = 0,
616 .owner = ARM_SMCCC_OWNER_SIP,
617 };
618 struct qcom_scm_res res;
619 int ret;
620
621 ret = qcom_scm_call(__scm->dev, &desc, &res);
622
623 return ret ? : res.result[0];
624 }
625
qcom_scm_pas_reset_assert(struct reset_controller_dev * rcdev,unsigned long idx)626 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
627 unsigned long idx)
628 {
629 if (idx != 0)
630 return -EINVAL;
631
632 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
633 }
634
qcom_scm_pas_reset_deassert(struct reset_controller_dev * rcdev,unsigned long idx)635 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
636 unsigned long idx)
637 {
638 if (idx != 0)
639 return -EINVAL;
640
641 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
642 }
643
644 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
645 .assert = qcom_scm_pas_reset_assert,
646 .deassert = qcom_scm_pas_reset_deassert,
647 };
648
qcom_scm_io_readl(phys_addr_t addr,unsigned int * val)649 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
650 {
651 struct qcom_scm_desc desc = {
652 .svc = QCOM_SCM_SVC_IO,
653 .cmd = QCOM_SCM_IO_READ,
654 .arginfo = QCOM_SCM_ARGS(1),
655 .args[0] = addr,
656 .owner = ARM_SMCCC_OWNER_SIP,
657 };
658 struct qcom_scm_res res;
659 int ret;
660
661
662 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
663 if (ret >= 0)
664 *val = res.result[0];
665
666 return ret < 0 ? ret : 0;
667 }
668 EXPORT_SYMBOL(qcom_scm_io_readl);
669
qcom_scm_io_writel(phys_addr_t addr,unsigned int val)670 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
671 {
672 struct qcom_scm_desc desc = {
673 .svc = QCOM_SCM_SVC_IO,
674 .cmd = QCOM_SCM_IO_WRITE,
675 .arginfo = QCOM_SCM_ARGS(2),
676 .args[0] = addr,
677 .args[1] = val,
678 .owner = ARM_SMCCC_OWNER_SIP,
679 };
680
681 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
682 }
683 EXPORT_SYMBOL(qcom_scm_io_writel);
684
685 /**
686 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
687 * supports restore security config interface.
688 *
689 * Return true if restore-cfg interface is supported, false if not.
690 */
qcom_scm_restore_sec_cfg_available(void)691 bool qcom_scm_restore_sec_cfg_available(void)
692 {
693 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
694 QCOM_SCM_MP_RESTORE_SEC_CFG);
695 }
696 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
697
qcom_scm_restore_sec_cfg(u32 device_id,u32 spare)698 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
699 {
700 struct qcom_scm_desc desc = {
701 .svc = QCOM_SCM_SVC_MP,
702 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
703 .arginfo = QCOM_SCM_ARGS(2),
704 .args[0] = device_id,
705 .args[1] = spare,
706 .owner = ARM_SMCCC_OWNER_SIP,
707 };
708 struct qcom_scm_res res;
709 int ret;
710
711 ret = qcom_scm_call(__scm->dev, &desc, &res);
712
713 return ret ? : res.result[0];
714 }
715 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
716
qcom_scm_iommu_secure_ptbl_size(u32 spare,size_t * size)717 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
718 {
719 struct qcom_scm_desc desc = {
720 .svc = QCOM_SCM_SVC_MP,
721 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
722 .arginfo = QCOM_SCM_ARGS(1),
723 .args[0] = spare,
724 .owner = ARM_SMCCC_OWNER_SIP,
725 };
726 struct qcom_scm_res res;
727 int ret;
728
729 ret = qcom_scm_call(__scm->dev, &desc, &res);
730
731 if (size)
732 *size = res.result[0];
733
734 return ret ? : res.result[1];
735 }
736 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
737
qcom_scm_iommu_secure_ptbl_init(u64 addr,u32 size,u32 spare)738 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
739 {
740 struct qcom_scm_desc desc = {
741 .svc = QCOM_SCM_SVC_MP,
742 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
743 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
744 QCOM_SCM_VAL),
745 .args[0] = addr,
746 .args[1] = size,
747 .args[2] = spare,
748 .owner = ARM_SMCCC_OWNER_SIP,
749 };
750 int ret;
751
752 ret = qcom_scm_call(__scm->dev, &desc, NULL);
753
754 /* the pg table has been initialized already, ignore the error */
755 if (ret == -EPERM)
756 ret = 0;
757
758 return ret;
759 }
760 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
761
qcom_scm_mem_protect_video_var(u32 cp_start,u32 cp_size,u32 cp_nonpixel_start,u32 cp_nonpixel_size)762 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
763 u32 cp_nonpixel_start,
764 u32 cp_nonpixel_size)
765 {
766 int ret;
767 struct qcom_scm_desc desc = {
768 .svc = QCOM_SCM_SVC_MP,
769 .cmd = QCOM_SCM_MP_VIDEO_VAR,
770 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
771 QCOM_SCM_VAL, QCOM_SCM_VAL),
772 .args[0] = cp_start,
773 .args[1] = cp_size,
774 .args[2] = cp_nonpixel_start,
775 .args[3] = cp_nonpixel_size,
776 .owner = ARM_SMCCC_OWNER_SIP,
777 };
778 struct qcom_scm_res res;
779
780 ret = qcom_scm_call(__scm->dev, &desc, &res);
781
782 return ret ? : res.result[0];
783 }
784 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
785
__qcom_scm_assign_mem(struct device * dev,phys_addr_t mem_region,size_t mem_sz,phys_addr_t src,size_t src_sz,phys_addr_t dest,size_t dest_sz)786 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
787 size_t mem_sz, phys_addr_t src, size_t src_sz,
788 phys_addr_t dest, size_t dest_sz)
789 {
790 int ret;
791 struct qcom_scm_desc desc = {
792 .svc = QCOM_SCM_SVC_MP,
793 .cmd = QCOM_SCM_MP_ASSIGN,
794 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
795 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
796 QCOM_SCM_VAL, QCOM_SCM_VAL),
797 .args[0] = mem_region,
798 .args[1] = mem_sz,
799 .args[2] = src,
800 .args[3] = src_sz,
801 .args[4] = dest,
802 .args[5] = dest_sz,
803 .args[6] = 0,
804 .owner = ARM_SMCCC_OWNER_SIP,
805 };
806 struct qcom_scm_res res;
807
808 ret = qcom_scm_call(dev, &desc, &res);
809
810 return ret ? : res.result[0];
811 }
812
813 /**
814 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
815 * @mem_addr: mem region whose ownership need to be reassigned
816 * @mem_sz: size of the region.
817 * @srcvm: vmid for current set of owners, each set bit in
818 * flag indicate a unique owner
819 * @newvm: array having new owners and corresponding permission
820 * flags
821 * @dest_cnt: number of owners in next set.
822 *
823 * Return negative errno on failure or 0 on success with @srcvm updated.
824 */
qcom_scm_assign_mem(phys_addr_t mem_addr,size_t mem_sz,unsigned int * srcvm,const struct qcom_scm_vmperm * newvm,unsigned int dest_cnt)825 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
826 unsigned int *srcvm,
827 const struct qcom_scm_vmperm *newvm,
828 unsigned int dest_cnt)
829 {
830 struct qcom_scm_current_perm_info *destvm;
831 struct qcom_scm_mem_map_info *mem_to_map;
832 phys_addr_t mem_to_map_phys;
833 phys_addr_t dest_phys;
834 dma_addr_t ptr_phys;
835 size_t mem_to_map_sz;
836 size_t dest_sz;
837 size_t src_sz;
838 size_t ptr_sz;
839 int next_vm;
840 __le32 *src;
841 void *ptr;
842 int ret, i, b;
843 unsigned long srcvm_bits = *srcvm;
844
845 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
846 mem_to_map_sz = sizeof(*mem_to_map);
847 dest_sz = dest_cnt * sizeof(*destvm);
848 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
849 ALIGN(dest_sz, SZ_64);
850
851 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
852 if (!ptr)
853 return -ENOMEM;
854
855 /* Fill source vmid detail */
856 src = ptr;
857 i = 0;
858 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
859 src[i++] = cpu_to_le32(b);
860
861 /* Fill details of mem buff to map */
862 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
863 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
864 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
865 mem_to_map->mem_size = cpu_to_le64(mem_sz);
866
867 next_vm = 0;
868 /* Fill details of next vmid detail */
869 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
870 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
871 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
872 destvm->vmid = cpu_to_le32(newvm->vmid);
873 destvm->perm = cpu_to_le32(newvm->perm);
874 destvm->ctx = 0;
875 destvm->ctx_size = 0;
876 next_vm |= BIT(newvm->vmid);
877 }
878
879 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
880 ptr_phys, src_sz, dest_phys, dest_sz);
881 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
882 if (ret) {
883 dev_err(__scm->dev,
884 "Assign memory protection call failed %d\n", ret);
885 return -EINVAL;
886 }
887
888 *srcvm = next_vm;
889 return 0;
890 }
891 EXPORT_SYMBOL(qcom_scm_assign_mem);
892
893 /**
894 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
895 */
qcom_scm_ocmem_lock_available(void)896 bool qcom_scm_ocmem_lock_available(void)
897 {
898 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
899 QCOM_SCM_OCMEM_LOCK_CMD);
900 }
901 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
902
903 /**
904 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
905 * region to the specified initiator
906 *
907 * @id: tz initiator id
908 * @offset: OCMEM offset
909 * @size: OCMEM size
910 * @mode: access mode (WIDE/NARROW)
911 */
qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id,u32 offset,u32 size,u32 mode)912 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
913 u32 mode)
914 {
915 struct qcom_scm_desc desc = {
916 .svc = QCOM_SCM_SVC_OCMEM,
917 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
918 .args[0] = id,
919 .args[1] = offset,
920 .args[2] = size,
921 .args[3] = mode,
922 .arginfo = QCOM_SCM_ARGS(4),
923 };
924
925 return qcom_scm_call(__scm->dev, &desc, NULL);
926 }
927 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
928
929 /**
930 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
931 * region from the specified initiator
932 *
933 * @id: tz initiator id
934 * @offset: OCMEM offset
935 * @size: OCMEM size
936 */
qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,u32 offset,u32 size)937 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
938 {
939 struct qcom_scm_desc desc = {
940 .svc = QCOM_SCM_SVC_OCMEM,
941 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
942 .args[0] = id,
943 .args[1] = offset,
944 .args[2] = size,
945 .arginfo = QCOM_SCM_ARGS(3),
946 };
947
948 return qcom_scm_call(__scm->dev, &desc, NULL);
949 }
950 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
951
952 /**
953 * qcom_scm_ice_available() - Is the ICE key programming interface available?
954 *
955 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
956 * qcom_scm_ice_set_key() are available.
957 */
qcom_scm_ice_available(void)958 bool qcom_scm_ice_available(void)
959 {
960 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
961 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
962 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
963 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
964 }
965 EXPORT_SYMBOL(qcom_scm_ice_available);
966
967 /**
968 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
969 * @index: the keyslot to invalidate
970 *
971 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
972 * these SoCs; only this SCM call does.
973 *
974 * Return: 0 on success; -errno on failure.
975 */
qcom_scm_ice_invalidate_key(u32 index)976 int qcom_scm_ice_invalidate_key(u32 index)
977 {
978 struct qcom_scm_desc desc = {
979 .svc = QCOM_SCM_SVC_ES,
980 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
981 .arginfo = QCOM_SCM_ARGS(1),
982 .args[0] = index,
983 .owner = ARM_SMCCC_OWNER_SIP,
984 };
985
986 return qcom_scm_call(__scm->dev, &desc, NULL);
987 }
988 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
989
990 /**
991 * qcom_scm_ice_set_key() - Set an inline encryption key
992 * @index: the keyslot into which to set the key
993 * @key: the key to program
994 * @key_size: the size of the key in bytes
995 * @cipher: the encryption algorithm the key is for
996 * @data_unit_size: the encryption data unit size, i.e. the size of each
997 * individual plaintext and ciphertext. Given in 512-byte
998 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
999 *
1000 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1001 * can then be used to encrypt/decrypt UFS I/O requests inline.
1002 *
1003 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
1004 * these SoCs; only this SCM call does.
1005 *
1006 * Return: 0 on success; -errno on failure.
1007 */
qcom_scm_ice_set_key(u32 index,const u8 * key,u32 key_size,enum qcom_scm_ice_cipher cipher,u32 data_unit_size)1008 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1009 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1010 {
1011 struct qcom_scm_desc desc = {
1012 .svc = QCOM_SCM_SVC_ES,
1013 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1014 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1015 QCOM_SCM_VAL, QCOM_SCM_VAL,
1016 QCOM_SCM_VAL),
1017 .args[0] = index,
1018 .args[2] = key_size,
1019 .args[3] = cipher,
1020 .args[4] = data_unit_size,
1021 .owner = ARM_SMCCC_OWNER_SIP,
1022 };
1023 void *keybuf;
1024 dma_addr_t key_phys;
1025 int ret;
1026
1027 /*
1028 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1029 * physical address that's been properly flushed. The sanctioned way to
1030 * do this is by using the DMA API. But as is best practice for crypto
1031 * keys, we also must wipe the key after use. This makes kmemdup() +
1032 * dma_map_single() not clearly correct, since the DMA API can use
1033 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1034 * keys is normally rare and thus not performance-critical.
1035 */
1036
1037 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1038 GFP_KERNEL);
1039 if (!keybuf)
1040 return -ENOMEM;
1041 memcpy(keybuf, key, key_size);
1042 desc.args[1] = key_phys;
1043
1044 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1045
1046 memzero_explicit(keybuf, key_size);
1047
1048 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1049 return ret;
1050 }
1051 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1052
1053 /**
1054 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1055 *
1056 * Return true if HDCP is supported, false if not.
1057 */
qcom_scm_hdcp_available(void)1058 bool qcom_scm_hdcp_available(void)
1059 {
1060 bool avail;
1061 int ret = qcom_scm_clk_enable();
1062
1063 if (ret)
1064 return ret;
1065
1066 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1067 QCOM_SCM_HDCP_INVOKE);
1068
1069 qcom_scm_clk_disable();
1070
1071 return avail;
1072 }
1073 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1074
1075 /**
1076 * qcom_scm_hdcp_req() - Send HDCP request.
1077 * @req: HDCP request array
1078 * @req_cnt: HDCP request array count
1079 * @resp: response buffer passed to SCM
1080 *
1081 * Write HDCP register(s) through SCM.
1082 */
qcom_scm_hdcp_req(struct qcom_scm_hdcp_req * req,u32 req_cnt,u32 * resp)1083 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1084 {
1085 int ret;
1086 struct qcom_scm_desc desc = {
1087 .svc = QCOM_SCM_SVC_HDCP,
1088 .cmd = QCOM_SCM_HDCP_INVOKE,
1089 .arginfo = QCOM_SCM_ARGS(10),
1090 .args = {
1091 req[0].addr,
1092 req[0].val,
1093 req[1].addr,
1094 req[1].val,
1095 req[2].addr,
1096 req[2].val,
1097 req[3].addr,
1098 req[3].val,
1099 req[4].addr,
1100 req[4].val
1101 },
1102 .owner = ARM_SMCCC_OWNER_SIP,
1103 };
1104 struct qcom_scm_res res;
1105
1106 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1107 return -ERANGE;
1108
1109 ret = qcom_scm_clk_enable();
1110 if (ret)
1111 return ret;
1112
1113 ret = qcom_scm_call(__scm->dev, &desc, &res);
1114 *resp = res.result[0];
1115
1116 qcom_scm_clk_disable();
1117
1118 return ret;
1119 }
1120 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1121
qcom_scm_qsmmu500_wait_safe_toggle(bool en)1122 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1123 {
1124 struct qcom_scm_desc desc = {
1125 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1126 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1127 .arginfo = QCOM_SCM_ARGS(2),
1128 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1129 .args[1] = en,
1130 .owner = ARM_SMCCC_OWNER_SIP,
1131 };
1132
1133
1134 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1135 }
1136 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1137
qcom_scm_find_dload_address(struct device * dev,u64 * addr)1138 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1139 {
1140 struct device_node *tcsr;
1141 struct device_node *np = dev->of_node;
1142 struct resource res;
1143 u32 offset;
1144 int ret;
1145
1146 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1147 if (!tcsr)
1148 return 0;
1149
1150 ret = of_address_to_resource(tcsr, 0, &res);
1151 of_node_put(tcsr);
1152 if (ret)
1153 return ret;
1154
1155 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1156 if (ret < 0)
1157 return ret;
1158
1159 *addr = res.start + offset;
1160
1161 return 0;
1162 }
1163
1164 /**
1165 * qcom_scm_is_available() - Checks if SCM is available
1166 */
qcom_scm_is_available(void)1167 bool qcom_scm_is_available(void)
1168 {
1169 return !!__scm;
1170 }
1171 EXPORT_SYMBOL(qcom_scm_is_available);
1172
qcom_scm_probe(struct platform_device * pdev)1173 static int qcom_scm_probe(struct platform_device *pdev)
1174 {
1175 struct qcom_scm *scm;
1176 unsigned long clks;
1177 int ret;
1178
1179 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1180 if (!scm)
1181 return -ENOMEM;
1182
1183 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1184 if (ret < 0)
1185 return ret;
1186
1187 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1188
1189 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1190 if (IS_ERR(scm->core_clk)) {
1191 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1192 return PTR_ERR(scm->core_clk);
1193
1194 if (clks & SCM_HAS_CORE_CLK) {
1195 dev_err(&pdev->dev, "failed to acquire core clk\n");
1196 return PTR_ERR(scm->core_clk);
1197 }
1198
1199 scm->core_clk = NULL;
1200 }
1201
1202 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1203 if (IS_ERR(scm->iface_clk)) {
1204 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1205 return PTR_ERR(scm->iface_clk);
1206
1207 if (clks & SCM_HAS_IFACE_CLK) {
1208 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1209 return PTR_ERR(scm->iface_clk);
1210 }
1211
1212 scm->iface_clk = NULL;
1213 }
1214
1215 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1216 if (IS_ERR(scm->bus_clk)) {
1217 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1218 return PTR_ERR(scm->bus_clk);
1219
1220 if (clks & SCM_HAS_BUS_CLK) {
1221 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1222 return PTR_ERR(scm->bus_clk);
1223 }
1224
1225 scm->bus_clk = NULL;
1226 }
1227
1228 scm->reset.ops = &qcom_scm_pas_reset_ops;
1229 scm->reset.nr_resets = 1;
1230 scm->reset.of_node = pdev->dev.of_node;
1231 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1232 if (ret)
1233 return ret;
1234
1235 /* vote for max clk rate for highest performance */
1236 ret = clk_set_rate(scm->core_clk, INT_MAX);
1237 if (ret)
1238 return ret;
1239
1240 __scm = scm;
1241 __scm->dev = &pdev->dev;
1242
1243 __get_convention();
1244
1245 /*
1246 * If requested enable "download mode", from this point on warmboot
1247 * will cause the the boot stages to enter download mode, unless
1248 * disabled below by a clean shutdown/reboot.
1249 */
1250 if (download_mode)
1251 qcom_scm_set_download_mode(true);
1252
1253 return 0;
1254 }
1255
qcom_scm_shutdown(struct platform_device * pdev)1256 static void qcom_scm_shutdown(struct platform_device *pdev)
1257 {
1258 /* Clean shutdown, disable download mode to allow normal restart */
1259 if (download_mode)
1260 qcom_scm_set_download_mode(false);
1261 }
1262
1263 static const struct of_device_id qcom_scm_dt_match[] = {
1264 { .compatible = "qcom,scm-apq8064",
1265 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1266 },
1267 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1268 SCM_HAS_IFACE_CLK |
1269 SCM_HAS_BUS_CLK)
1270 },
1271 { .compatible = "qcom,scm-ipq4019" },
1272 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1273 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1274 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1275 SCM_HAS_IFACE_CLK |
1276 SCM_HAS_BUS_CLK)
1277 },
1278 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1279 SCM_HAS_IFACE_CLK |
1280 SCM_HAS_BUS_CLK)
1281 },
1282 { .compatible = "qcom,scm-msm8994" },
1283 { .compatible = "qcom,scm-msm8996" },
1284 { .compatible = "qcom,scm" },
1285 {}
1286 };
1287
1288 static struct platform_driver qcom_scm_driver = {
1289 .driver = {
1290 .name = "qcom_scm",
1291 .of_match_table = qcom_scm_dt_match,
1292 },
1293 .probe = qcom_scm_probe,
1294 .shutdown = qcom_scm_shutdown,
1295 };
1296
qcom_scm_init(void)1297 static int __init qcom_scm_init(void)
1298 {
1299 return platform_driver_register(&qcom_scm_driver);
1300 }
1301 subsys_initcall(qcom_scm_init);
1302