• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Qualcomm self-authenticating modem subsystem remoteproc driver
4  *
5  * Copyright (C) 2016 Linaro Ltd.
6  * Copyright (C) 2014 Sony Mobile Communications AB
7  * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/regmap.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/remoteproc.h>
25 #include <linux/reset.h>
26 #include <linux/soc/qcom/mdt_loader.h>
27 #include <linux/iopoll.h>
28 
29 #include "remoteproc_internal.h"
30 #include "qcom_common.h"
31 #include "qcom_q6v5.h"
32 
33 #include <linux/qcom_scm.h>
34 
35 #define MPSS_CRASH_REASON_SMEM		421
36 
37 /* RMB Status Register Values */
38 #define RMB_PBL_SUCCESS			0x1
39 
40 #define RMB_MBA_XPU_UNLOCKED		0x1
41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
42 #define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
43 #define RMB_MBA_AUTH_COMPLETE		0x4
44 
45 /* PBL/MBA interface registers */
46 #define RMB_MBA_IMAGE_REG		0x00
47 #define RMB_PBL_STATUS_REG		0x04
48 #define RMB_MBA_COMMAND_REG		0x08
49 #define RMB_MBA_STATUS_REG		0x0C
50 #define RMB_PMI_META_DATA_REG		0x10
51 #define RMB_PMI_CODE_START_REG		0x14
52 #define RMB_PMI_CODE_LENGTH_REG		0x18
53 #define RMB_MBA_MSS_STATUS		0x40
54 #define RMB_MBA_ALT_RESET		0x44
55 
56 #define RMB_CMD_META_DATA_READY		0x1
57 #define RMB_CMD_LOAD_READY		0x2
58 
59 /* QDSP6SS Register Offsets */
60 #define QDSP6SS_RESET_REG		0x014
61 #define QDSP6SS_GFMUX_CTL_REG		0x020
62 #define QDSP6SS_PWR_CTL_REG		0x030
63 #define QDSP6SS_MEM_PWR_CTL		0x0B0
64 #define QDSP6SS_STRAP_ACC		0x110
65 
66 /* AXI Halt Register Offsets */
67 #define AXI_HALTREQ_REG			0x0
68 #define AXI_HALTACK_REG			0x4
69 #define AXI_IDLE_REG			0x8
70 
71 #define HALT_ACK_TIMEOUT_MS		100
72 
73 /* QDSP6SS_RESET */
74 #define Q6SS_STOP_CORE			BIT(0)
75 #define Q6SS_CORE_ARES			BIT(1)
76 #define Q6SS_BUS_ARES_ENABLE		BIT(2)
77 
78 /* QDSP6SS_GFMUX_CTL */
79 #define Q6SS_CLK_ENABLE			BIT(1)
80 
81 /* QDSP6SS_PWR_CTL */
82 #define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
83 #define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
84 #define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
85 #define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
86 #define Q6SS_ETB_SLP_NRET_N		BIT(17)
87 #define Q6SS_L2DATA_STBY_N		BIT(18)
88 #define Q6SS_SLP_RET_N			BIT(19)
89 #define Q6SS_CLAMP_IO			BIT(20)
90 #define QDSS_BHS_ON			BIT(21)
91 #define QDSS_LDO_BYP			BIT(22)
92 
93 /* QDSP6v56 parameters */
94 #define QDSP6v56_LDO_BYP		BIT(25)
95 #define QDSP6v56_BHS_ON		BIT(24)
96 #define QDSP6v56_CLAMP_WL		BIT(21)
97 #define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
98 #define HALT_CHECK_MAX_LOOPS		200
99 #define QDSP6SS_XO_CBCR		0x0038
100 #define QDSP6SS_ACC_OVERRIDE_VAL		0x20
101 
102 /* QDSP6v65 parameters */
103 #define QDSP6SS_SLEEP                   0x3C
104 #define QDSP6SS_BOOT_CORE_START         0x400
105 #define QDSP6SS_BOOT_CMD                0x404
106 #define SLEEP_CHECK_MAX_LOOPS           200
107 #define BOOT_FSM_TIMEOUT                10000
108 
109 struct reg_info {
110 	struct regulator *reg;
111 	int uV;
112 	int uA;
113 };
114 
115 struct qcom_mss_reg_res {
116 	const char *supply;
117 	int uV;
118 	int uA;
119 };
120 
121 struct rproc_hexagon_res {
122 	const char *hexagon_mba_image;
123 	struct qcom_mss_reg_res *proxy_supply;
124 	struct qcom_mss_reg_res *active_supply;
125 	char **proxy_clk_names;
126 	char **reset_clk_names;
127 	char **active_clk_names;
128 	char **active_pd_names;
129 	char **proxy_pd_names;
130 	int version;
131 	bool need_mem_protection;
132 	bool has_alt_reset;
133 };
134 
135 struct q6v5 {
136 	struct device *dev;
137 	struct rproc *rproc;
138 
139 	void __iomem *reg_base;
140 	void __iomem *rmb_base;
141 
142 	struct regmap *halt_map;
143 	u32 halt_q6;
144 	u32 halt_modem;
145 	u32 halt_nc;
146 
147 	struct reset_control *mss_restart;
148 	struct reset_control *pdc_reset;
149 
150 	struct qcom_q6v5 q6v5;
151 
152 	struct clk *active_clks[8];
153 	struct clk *reset_clks[4];
154 	struct clk *proxy_clks[4];
155 	struct device *active_pds[1];
156 	struct device *proxy_pds[3];
157 	int active_clk_count;
158 	int reset_clk_count;
159 	int proxy_clk_count;
160 	int active_pd_count;
161 	int proxy_pd_count;
162 
163 	struct reg_info active_regs[1];
164 	struct reg_info proxy_regs[3];
165 	int active_reg_count;
166 	int proxy_reg_count;
167 
168 	bool running;
169 
170 	bool dump_mba_loaded;
171 	unsigned long dump_segment_mask;
172 	unsigned long dump_complete_mask;
173 
174 	phys_addr_t mba_phys;
175 	void *mba_region;
176 	size_t mba_size;
177 
178 	phys_addr_t mpss_phys;
179 	phys_addr_t mpss_reloc;
180 	void *mpss_region;
181 	size_t mpss_size;
182 
183 	struct qcom_rproc_glink glink_subdev;
184 	struct qcom_rproc_subdev smd_subdev;
185 	struct qcom_rproc_ssr ssr_subdev;
186 	struct qcom_sysmon *sysmon;
187 	bool need_mem_protection;
188 	bool has_alt_reset;
189 	int mpss_perm;
190 	int mba_perm;
191 	const char *hexagon_mdt_image;
192 	int version;
193 };
194 
195 enum {
196 	MSS_MSM8916,
197 	MSS_MSM8974,
198 	MSS_MSM8996,
199 	MSS_SDM845,
200 };
201 
q6v5_regulator_init(struct device * dev,struct reg_info * regs,const struct qcom_mss_reg_res * reg_res)202 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
203 			       const struct qcom_mss_reg_res *reg_res)
204 {
205 	int rc;
206 	int i;
207 
208 	if (!reg_res)
209 		return 0;
210 
211 	for (i = 0; reg_res[i].supply; i++) {
212 		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
213 		if (IS_ERR(regs[i].reg)) {
214 			rc = PTR_ERR(regs[i].reg);
215 			if (rc != -EPROBE_DEFER)
216 				dev_err(dev, "Failed to get %s\n regulator",
217 					reg_res[i].supply);
218 			return rc;
219 		}
220 
221 		regs[i].uV = reg_res[i].uV;
222 		regs[i].uA = reg_res[i].uA;
223 	}
224 
225 	return i;
226 }
227 
q6v5_regulator_enable(struct q6v5 * qproc,struct reg_info * regs,int count)228 static int q6v5_regulator_enable(struct q6v5 *qproc,
229 				 struct reg_info *regs, int count)
230 {
231 	int ret;
232 	int i;
233 
234 	for (i = 0; i < count; i++) {
235 		if (regs[i].uV > 0) {
236 			ret = regulator_set_voltage(regs[i].reg,
237 					regs[i].uV, INT_MAX);
238 			if (ret) {
239 				dev_err(qproc->dev,
240 					"Failed to request voltage for %d.\n",
241 						i);
242 				goto err;
243 			}
244 		}
245 
246 		if (regs[i].uA > 0) {
247 			ret = regulator_set_load(regs[i].reg,
248 						 regs[i].uA);
249 			if (ret < 0) {
250 				dev_err(qproc->dev,
251 					"Failed to set regulator mode\n");
252 				goto err;
253 			}
254 		}
255 
256 		ret = regulator_enable(regs[i].reg);
257 		if (ret) {
258 			dev_err(qproc->dev, "Regulator enable failed\n");
259 			goto err;
260 		}
261 	}
262 
263 	return 0;
264 err:
265 	for (; i >= 0; i--) {
266 		if (regs[i].uV > 0)
267 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
268 
269 		if (regs[i].uA > 0)
270 			regulator_set_load(regs[i].reg, 0);
271 
272 		regulator_disable(regs[i].reg);
273 	}
274 
275 	return ret;
276 }
277 
q6v5_regulator_disable(struct q6v5 * qproc,struct reg_info * regs,int count)278 static void q6v5_regulator_disable(struct q6v5 *qproc,
279 				   struct reg_info *regs, int count)
280 {
281 	int i;
282 
283 	for (i = 0; i < count; i++) {
284 		if (regs[i].uV > 0)
285 			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
286 
287 		if (regs[i].uA > 0)
288 			regulator_set_load(regs[i].reg, 0);
289 
290 		regulator_disable(regs[i].reg);
291 	}
292 }
293 
q6v5_clk_enable(struct device * dev,struct clk ** clks,int count)294 static int q6v5_clk_enable(struct device *dev,
295 			   struct clk **clks, int count)
296 {
297 	int rc;
298 	int i;
299 
300 	for (i = 0; i < count; i++) {
301 		rc = clk_prepare_enable(clks[i]);
302 		if (rc) {
303 			dev_err(dev, "Clock enable failed\n");
304 			goto err;
305 		}
306 	}
307 
308 	return 0;
309 err:
310 	for (i--; i >= 0; i--)
311 		clk_disable_unprepare(clks[i]);
312 
313 	return rc;
314 }
315 
q6v5_clk_disable(struct device * dev,struct clk ** clks,int count)316 static void q6v5_clk_disable(struct device *dev,
317 			     struct clk **clks, int count)
318 {
319 	int i;
320 
321 	for (i = 0; i < count; i++)
322 		clk_disable_unprepare(clks[i]);
323 }
324 
q6v5_pds_enable(struct q6v5 * qproc,struct device ** pds,size_t pd_count)325 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
326 			   size_t pd_count)
327 {
328 	int ret;
329 	int i;
330 
331 	for (i = 0; i < pd_count; i++) {
332 		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
333 		ret = pm_runtime_get_sync(pds[i]);
334 		if (ret < 0)
335 			goto unroll_pd_votes;
336 	}
337 
338 	return 0;
339 
340 unroll_pd_votes:
341 	for (i--; i >= 0; i--) {
342 		dev_pm_genpd_set_performance_state(pds[i], 0);
343 		pm_runtime_put(pds[i]);
344 	}
345 
346 	return ret;
347 };
348 
q6v5_pds_disable(struct q6v5 * qproc,struct device ** pds,size_t pd_count)349 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
350 			     size_t pd_count)
351 {
352 	int i;
353 
354 	for (i = 0; i < pd_count; i++) {
355 		dev_pm_genpd_set_performance_state(pds[i], 0);
356 		pm_runtime_put(pds[i]);
357 	}
358 }
359 
q6v5_xfer_mem_ownership(struct q6v5 * qproc,int * current_perm,bool remote_owner,phys_addr_t addr,size_t size)360 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
361 				   bool remote_owner, phys_addr_t addr,
362 				   size_t size)
363 {
364 	struct qcom_scm_vmperm next;
365 
366 	if (!qproc->need_mem_protection)
367 		return 0;
368 	if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
369 		return 0;
370 	if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
371 		return 0;
372 
373 	next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
374 	next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
375 
376 	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
377 				   current_perm, &next, 1);
378 }
379 
q6v5_load(struct rproc * rproc,const struct firmware * fw)380 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
381 {
382 	struct q6v5 *qproc = rproc->priv;
383 
384 	memcpy(qproc->mba_region, fw->data, fw->size);
385 
386 	return 0;
387 }
388 
q6v5_reset_assert(struct q6v5 * qproc)389 static int q6v5_reset_assert(struct q6v5 *qproc)
390 {
391 	int ret;
392 
393 	if (qproc->has_alt_reset) {
394 		reset_control_assert(qproc->pdc_reset);
395 		ret = reset_control_reset(qproc->mss_restart);
396 		reset_control_deassert(qproc->pdc_reset);
397 	} else {
398 		ret = reset_control_assert(qproc->mss_restart);
399 	}
400 
401 	return ret;
402 }
403 
q6v5_reset_deassert(struct q6v5 * qproc)404 static int q6v5_reset_deassert(struct q6v5 *qproc)
405 {
406 	int ret;
407 
408 	if (qproc->has_alt_reset) {
409 		reset_control_assert(qproc->pdc_reset);
410 		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
411 		ret = reset_control_reset(qproc->mss_restart);
412 		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
413 		reset_control_deassert(qproc->pdc_reset);
414 	} else {
415 		ret = reset_control_deassert(qproc->mss_restart);
416 	}
417 
418 	return ret;
419 }
420 
q6v5_rmb_pbl_wait(struct q6v5 * qproc,int ms)421 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
422 {
423 	unsigned long timeout;
424 	s32 val;
425 
426 	timeout = jiffies + msecs_to_jiffies(ms);
427 	for (;;) {
428 		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
429 		if (val)
430 			break;
431 
432 		if (time_after(jiffies, timeout))
433 			return -ETIMEDOUT;
434 
435 		msleep(1);
436 	}
437 
438 	return val;
439 }
440 
q6v5_rmb_mba_wait(struct q6v5 * qproc,u32 status,int ms)441 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
442 {
443 
444 	unsigned long timeout;
445 	s32 val;
446 
447 	timeout = jiffies + msecs_to_jiffies(ms);
448 	for (;;) {
449 		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
450 		if (val < 0)
451 			break;
452 
453 		if (!status && val)
454 			break;
455 		else if (status && val == status)
456 			break;
457 
458 		if (time_after(jiffies, timeout))
459 			return -ETIMEDOUT;
460 
461 		msleep(1);
462 	}
463 
464 	return val;
465 }
466 
q6v5proc_reset(struct q6v5 * qproc)467 static int q6v5proc_reset(struct q6v5 *qproc)
468 {
469 	u32 val;
470 	int ret;
471 	int i;
472 
473 	if (qproc->version == MSS_SDM845) {
474 		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
475 		val |= 0x1;
476 		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
477 
478 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
479 					 val, !(val & BIT(31)), 1,
480 					 SLEEP_CHECK_MAX_LOOPS);
481 		if (ret) {
482 			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
483 			return -ETIMEDOUT;
484 		}
485 
486 		/* De-assert QDSP6 stop core */
487 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
488 		/* Trigger boot FSM */
489 		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
490 
491 		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
492 				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
493 		if (ret) {
494 			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
495 			/* Reset the modem so that boot FSM is in reset state */
496 			q6v5_reset_deassert(qproc);
497 			return ret;
498 		}
499 
500 		goto pbl_wait;
501 	} else if (qproc->version == MSS_MSM8996) {
502 		/* Override the ACC value if required */
503 		writel(QDSP6SS_ACC_OVERRIDE_VAL,
504 		       qproc->reg_base + QDSP6SS_STRAP_ACC);
505 
506 		/* Assert resets, stop core */
507 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
508 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
509 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
510 
511 		/* BHS require xo cbcr to be enabled */
512 		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
513 		val |= 0x1;
514 		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
515 
516 		/* Read CLKOFF bit to go low indicating CLK is enabled */
517 		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
518 					 val, !(val & BIT(31)), 1,
519 					 HALT_CHECK_MAX_LOOPS);
520 		if (ret) {
521 			dev_err(qproc->dev,
522 				"xo cbcr enabling timed out (rc:%d)\n", ret);
523 			return ret;
524 		}
525 		/* Enable power block headswitch and wait for it to stabilize */
526 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
527 		val |= QDSP6v56_BHS_ON;
528 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
529 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
530 		udelay(1);
531 
532 		/* Put LDO in bypass mode */
533 		val |= QDSP6v56_LDO_BYP;
534 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 
536 		/* Deassert QDSP6 compiler memory clamp */
537 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
538 		val &= ~QDSP6v56_CLAMP_QMC_MEM;
539 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
540 
541 		/* Deassert memory peripheral sleep and L2 memory standby */
542 		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
543 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
544 
545 		/* Turn on L1, L2, ETB and JU memories 1 at a time */
546 		val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
547 		for (i = 19; i >= 0; i--) {
548 			val |= BIT(i);
549 			writel(val, qproc->reg_base +
550 						QDSP6SS_MEM_PWR_CTL);
551 			/*
552 			 * Read back value to ensure the write is done then
553 			 * wait for 1us for both memory peripheral and data
554 			 * array to turn on.
555 			 */
556 			val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
557 			udelay(1);
558 		}
559 		/* Remove word line clamp */
560 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
561 		val &= ~QDSP6v56_CLAMP_WL;
562 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
563 	} else {
564 		/* Assert resets, stop core */
565 		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
566 		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
567 		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
568 
569 		/* Enable power block headswitch and wait for it to stabilize */
570 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
571 		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
572 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
573 		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
574 		udelay(1);
575 		/*
576 		 * Turn on memories. L2 banks should be done individually
577 		 * to minimize inrush current.
578 		 */
579 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
580 		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
581 			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
582 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
583 		val |= Q6SS_L2DATA_SLP_NRET_N_2;
584 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
585 		val |= Q6SS_L2DATA_SLP_NRET_N_1;
586 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
587 		val |= Q6SS_L2DATA_SLP_NRET_N_0;
588 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
589 	}
590 	/* Remove IO clamp */
591 	val &= ~Q6SS_CLAMP_IO;
592 	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
593 
594 	/* Bring core out of reset */
595 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
596 	val &= ~Q6SS_CORE_ARES;
597 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
598 
599 	/* Turn on core clock */
600 	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
601 	val |= Q6SS_CLK_ENABLE;
602 	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
603 
604 	/* Start core execution */
605 	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
606 	val &= ~Q6SS_STOP_CORE;
607 	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
608 
609 pbl_wait:
610 	/* Wait for PBL status */
611 	ret = q6v5_rmb_pbl_wait(qproc, 1000);
612 	if (ret == -ETIMEDOUT) {
613 		dev_err(qproc->dev, "PBL boot timed out\n");
614 	} else if (ret != RMB_PBL_SUCCESS) {
615 		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
616 		ret = -EINVAL;
617 	} else {
618 		ret = 0;
619 	}
620 
621 	return ret;
622 }
623 
q6v5proc_halt_axi_port(struct q6v5 * qproc,struct regmap * halt_map,u32 offset)624 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
625 				   struct regmap *halt_map,
626 				   u32 offset)
627 {
628 	unsigned long timeout;
629 	unsigned int val;
630 	int ret;
631 
632 	/* Check if we're already idle */
633 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
634 	if (!ret && val)
635 		return;
636 
637 	/* Assert halt request */
638 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
639 
640 	/* Wait for halt */
641 	timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
642 	for (;;) {
643 		ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
644 		if (ret || val || time_after(jiffies, timeout))
645 			break;
646 
647 		msleep(1);
648 	}
649 
650 	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
651 	if (ret || !val)
652 		dev_err(qproc->dev, "port failed halt\n");
653 
654 	/* Clear halt request (port will remain halted until reset) */
655 	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
656 }
657 
q6v5_mpss_init_image(struct q6v5 * qproc,const struct firmware * fw)658 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
659 {
660 	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
661 	dma_addr_t phys;
662 	void *metadata;
663 	int mdata_perm;
664 	int xferop_ret;
665 	size_t size;
666 	void *ptr;
667 	int ret;
668 
669 	metadata = qcom_mdt_read_metadata(fw, &size);
670 	if (IS_ERR(metadata))
671 		return PTR_ERR(metadata);
672 
673 	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
674 	if (!ptr) {
675 		kfree(metadata);
676 		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
677 		return -ENOMEM;
678 	}
679 
680 	memcpy(ptr, metadata, size);
681 
682 	/* Hypervisor mapping to access metadata by modem */
683 	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
684 	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
685 	if (ret) {
686 		dev_err(qproc->dev,
687 			"assigning Q6 access to metadata failed: %d\n", ret);
688 		ret = -EAGAIN;
689 		goto free_dma_attrs;
690 	}
691 
692 	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
693 	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
694 
695 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
696 	if (ret == -ETIMEDOUT)
697 		dev_err(qproc->dev, "MPSS header authentication timed out\n");
698 	else if (ret < 0)
699 		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
700 
701 	/* Metadata authentication done, remove modem access */
702 	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
703 	if (xferop_ret)
704 		dev_warn(qproc->dev,
705 			 "mdt buffer not reclaimed system may become unstable\n");
706 
707 free_dma_attrs:
708 	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
709 	kfree(metadata);
710 
711 	return ret < 0 ? ret : 0;
712 }
713 
q6v5_phdr_valid(const struct elf32_phdr * phdr)714 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
715 {
716 	if (phdr->p_type != PT_LOAD)
717 		return false;
718 
719 	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
720 		return false;
721 
722 	if (!phdr->p_memsz)
723 		return false;
724 
725 	return true;
726 }
727 
q6v5_mba_load(struct q6v5 * qproc)728 static int q6v5_mba_load(struct q6v5 *qproc)
729 {
730 	int ret;
731 	int xfermemop_ret;
732 
733 	qcom_q6v5_prepare(&qproc->q6v5);
734 
735 	ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
736 	if (ret < 0) {
737 		dev_err(qproc->dev, "failed to enable active power domains\n");
738 		goto disable_irqs;
739 	}
740 
741 	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
742 	if (ret < 0) {
743 		dev_err(qproc->dev, "failed to enable proxy power domains\n");
744 		goto disable_active_pds;
745 	}
746 
747 	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
748 				    qproc->proxy_reg_count);
749 	if (ret) {
750 		dev_err(qproc->dev, "failed to enable proxy supplies\n");
751 		goto disable_proxy_pds;
752 	}
753 
754 	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
755 			      qproc->proxy_clk_count);
756 	if (ret) {
757 		dev_err(qproc->dev, "failed to enable proxy clocks\n");
758 		goto disable_proxy_reg;
759 	}
760 
761 	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
762 				    qproc->active_reg_count);
763 	if (ret) {
764 		dev_err(qproc->dev, "failed to enable supplies\n");
765 		goto disable_proxy_clk;
766 	}
767 
768 	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
769 			      qproc->reset_clk_count);
770 	if (ret) {
771 		dev_err(qproc->dev, "failed to enable reset clocks\n");
772 		goto disable_vdd;
773 	}
774 
775 	ret = q6v5_reset_deassert(qproc);
776 	if (ret) {
777 		dev_err(qproc->dev, "failed to deassert mss restart\n");
778 		goto disable_reset_clks;
779 	}
780 
781 	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
782 			      qproc->active_clk_count);
783 	if (ret) {
784 		dev_err(qproc->dev, "failed to enable clocks\n");
785 		goto assert_reset;
786 	}
787 
788 	/* Assign MBA image access in DDR to q6 */
789 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
790 				      qproc->mba_phys, qproc->mba_size);
791 	if (ret) {
792 		dev_err(qproc->dev,
793 			"assigning Q6 access to mba memory failed: %d\n", ret);
794 		goto disable_active_clks;
795 	}
796 
797 	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
798 
799 	ret = q6v5proc_reset(qproc);
800 	if (ret)
801 		goto reclaim_mba;
802 
803 	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
804 	if (ret == -ETIMEDOUT) {
805 		dev_err(qproc->dev, "MBA boot timed out\n");
806 		goto halt_axi_ports;
807 	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
808 		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
809 		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
810 		ret = -EINVAL;
811 		goto halt_axi_ports;
812 	}
813 
814 	qproc->dump_mba_loaded = true;
815 	return 0;
816 
817 halt_axi_ports:
818 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
819 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
820 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
821 
822 reclaim_mba:
823 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
824 						qproc->mba_phys,
825 						qproc->mba_size);
826 	if (xfermemop_ret) {
827 		dev_err(qproc->dev,
828 			"Failed to reclaim mba buffer, system may become unstable\n");
829 	}
830 
831 disable_active_clks:
832 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
833 			 qproc->active_clk_count);
834 assert_reset:
835 	q6v5_reset_assert(qproc);
836 disable_reset_clks:
837 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
838 			 qproc->reset_clk_count);
839 disable_vdd:
840 	q6v5_regulator_disable(qproc, qproc->active_regs,
841 			       qproc->active_reg_count);
842 disable_proxy_clk:
843 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
844 			 qproc->proxy_clk_count);
845 disable_proxy_reg:
846 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
847 			       qproc->proxy_reg_count);
848 disable_proxy_pds:
849 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
850 disable_active_pds:
851 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
852 disable_irqs:
853 	qcom_q6v5_unprepare(&qproc->q6v5);
854 
855 	return ret;
856 }
857 
q6v5_mba_reclaim(struct q6v5 * qproc)858 static void q6v5_mba_reclaim(struct q6v5 *qproc)
859 {
860 	int ret;
861 	u32 val;
862 
863 	qproc->dump_mba_loaded = false;
864 
865 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
866 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
867 	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
868 	if (qproc->version == MSS_MSM8996) {
869 		/*
870 		 * To avoid high MX current during LPASS/MSS restart.
871 		 */
872 		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
873 		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
874 			QDSP6v56_CLAMP_QMC_MEM;
875 		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
876 	}
877 
878 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
879 				      false, qproc->mpss_phys,
880 				      qproc->mpss_size);
881 	WARN_ON(ret);
882 
883 	q6v5_reset_assert(qproc);
884 
885 	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
886 			 qproc->reset_clk_count);
887 	q6v5_clk_disable(qproc->dev, qproc->active_clks,
888 			 qproc->active_clk_count);
889 	q6v5_regulator_disable(qproc, qproc->active_regs,
890 			       qproc->active_reg_count);
891 	q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
892 
893 	/* In case of failure or coredump scenario where reclaiming MBA memory
894 	 * could not happen reclaim it here.
895 	 */
896 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
897 				      qproc->mba_phys,
898 				      qproc->mba_size);
899 	WARN_ON(ret);
900 
901 	ret = qcom_q6v5_unprepare(&qproc->q6v5);
902 	if (ret) {
903 		q6v5_pds_disable(qproc, qproc->proxy_pds,
904 				 qproc->proxy_pd_count);
905 		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
906 				 qproc->proxy_clk_count);
907 		q6v5_regulator_disable(qproc, qproc->proxy_regs,
908 				       qproc->proxy_reg_count);
909 	}
910 }
911 
q6v5_mpss_load(struct q6v5 * qproc)912 static int q6v5_mpss_load(struct q6v5 *qproc)
913 {
914 	const struct elf32_phdr *phdrs;
915 	const struct elf32_phdr *phdr;
916 	const struct firmware *seg_fw;
917 	const struct firmware *fw;
918 	struct elf32_hdr *ehdr;
919 	phys_addr_t mpss_reloc;
920 	phys_addr_t boot_addr;
921 	phys_addr_t min_addr = PHYS_ADDR_MAX;
922 	phys_addr_t max_addr = 0;
923 	bool relocate = false;
924 	char *fw_name;
925 	size_t fw_name_len;
926 	ssize_t offset;
927 	size_t size = 0;
928 	void *ptr;
929 	int ret;
930 	int i;
931 
932 	fw_name_len = strlen(qproc->hexagon_mdt_image);
933 	if (fw_name_len <= 4)
934 		return -EINVAL;
935 
936 	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
937 	if (!fw_name)
938 		return -ENOMEM;
939 
940 	ret = request_firmware(&fw, fw_name, qproc->dev);
941 	if (ret < 0) {
942 		dev_err(qproc->dev, "unable to load %s\n", fw_name);
943 		goto out;
944 	}
945 
946 	/* Initialize the RMB validator */
947 	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
948 
949 	ret = q6v5_mpss_init_image(qproc, fw);
950 	if (ret)
951 		goto release_firmware;
952 
953 	ehdr = (struct elf32_hdr *)fw->data;
954 	phdrs = (struct elf32_phdr *)(ehdr + 1);
955 
956 	for (i = 0; i < ehdr->e_phnum; i++) {
957 		phdr = &phdrs[i];
958 
959 		if (!q6v5_phdr_valid(phdr))
960 			continue;
961 
962 		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
963 			relocate = true;
964 
965 		if (phdr->p_paddr < min_addr)
966 			min_addr = phdr->p_paddr;
967 
968 		if (phdr->p_paddr + phdr->p_memsz > max_addr)
969 			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
970 	}
971 
972 	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
973 	qproc->mpss_reloc = mpss_reloc;
974 	/* Load firmware segments */
975 	for (i = 0; i < ehdr->e_phnum; i++) {
976 		phdr = &phdrs[i];
977 
978 		if (!q6v5_phdr_valid(phdr))
979 			continue;
980 
981 		offset = phdr->p_paddr - mpss_reloc;
982 		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
983 			dev_err(qproc->dev, "segment outside memory range\n");
984 			ret = -EINVAL;
985 			goto release_firmware;
986 		}
987 
988 		ptr = qproc->mpss_region + offset;
989 
990 		if (phdr->p_filesz && phdr->p_offset < fw->size) {
991 			/* Firmware is large enough to be non-split */
992 			if (phdr->p_offset + phdr->p_filesz > fw->size) {
993 				dev_err(qproc->dev,
994 					"failed to load segment %d from truncated file %s\n",
995 					i, fw_name);
996 				ret = -EINVAL;
997 				goto release_firmware;
998 			}
999 
1000 			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1001 		} else if (phdr->p_filesz) {
1002 			/* Replace "xxx.xxx" with "xxx.bxx" */
1003 			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1004 			ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1005 			if (ret) {
1006 				dev_err(qproc->dev, "failed to load %s\n", fw_name);
1007 				goto release_firmware;
1008 			}
1009 
1010 			memcpy(ptr, seg_fw->data, seg_fw->size);
1011 
1012 			release_firmware(seg_fw);
1013 		}
1014 
1015 		if (phdr->p_memsz > phdr->p_filesz) {
1016 			memset(ptr + phdr->p_filesz, 0,
1017 			       phdr->p_memsz - phdr->p_filesz);
1018 		}
1019 		size += phdr->p_memsz;
1020 	}
1021 
1022 	/* Transfer ownership of modem ddr region to q6 */
1023 	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1024 				      qproc->mpss_phys, qproc->mpss_size);
1025 	if (ret) {
1026 		dev_err(qproc->dev,
1027 			"assigning Q6 access to mpss memory failed: %d\n", ret);
1028 		ret = -EAGAIN;
1029 		goto release_firmware;
1030 	}
1031 
1032 	boot_addr = relocate ? qproc->mpss_phys : min_addr;
1033 	writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1034 	writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1035 	writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1036 
1037 	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1038 	if (ret == -ETIMEDOUT)
1039 		dev_err(qproc->dev, "MPSS authentication timed out\n");
1040 	else if (ret < 0)
1041 		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1042 
1043 release_firmware:
1044 	release_firmware(fw);
1045 out:
1046 	kfree(fw_name);
1047 
1048 	return ret < 0 ? ret : 0;
1049 }
1050 
qcom_q6v5_dump_segment(struct rproc * rproc,struct rproc_dump_segment * segment,void * dest)1051 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1052 				   struct rproc_dump_segment *segment,
1053 				   void *dest)
1054 {
1055 	int ret = 0;
1056 	struct q6v5 *qproc = rproc->priv;
1057 	unsigned long mask = BIT((unsigned long)segment->priv);
1058 	void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1059 
1060 	/* Unlock mba before copying segments */
1061 	if (!qproc->dump_mba_loaded)
1062 		ret = q6v5_mba_load(qproc);
1063 
1064 	if (!ptr || ret)
1065 		memset(dest, 0xff, segment->size);
1066 	else
1067 		memcpy(dest, ptr, segment->size);
1068 
1069 	qproc->dump_segment_mask |= mask;
1070 
1071 	/* Reclaim mba after copying segments */
1072 	if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1073 		if (qproc->dump_mba_loaded)
1074 			q6v5_mba_reclaim(qproc);
1075 	}
1076 }
1077 
q6v5_start(struct rproc * rproc)1078 static int q6v5_start(struct rproc *rproc)
1079 {
1080 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1081 	int xfermemop_ret;
1082 	int ret;
1083 
1084 	ret = q6v5_mba_load(qproc);
1085 	if (ret)
1086 		return ret;
1087 
1088 	dev_info(qproc->dev, "MBA booted, loading mpss\n");
1089 
1090 	ret = q6v5_mpss_load(qproc);
1091 	if (ret)
1092 		goto reclaim_mpss;
1093 
1094 	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1095 	if (ret == -ETIMEDOUT) {
1096 		dev_err(qproc->dev, "start timed out\n");
1097 		goto reclaim_mpss;
1098 	}
1099 
1100 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1101 						qproc->mba_phys,
1102 						qproc->mba_size);
1103 	if (xfermemop_ret)
1104 		dev_err(qproc->dev,
1105 			"Failed to reclaim mba buffer system may become unstable\n");
1106 
1107 	/* Reset Dump Segment Mask */
1108 	qproc->dump_segment_mask = 0;
1109 	qproc->running = true;
1110 
1111 	return 0;
1112 
1113 reclaim_mpss:
1114 	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1115 						false, qproc->mpss_phys,
1116 						qproc->mpss_size);
1117 	WARN_ON(xfermemop_ret);
1118 	q6v5_mba_reclaim(qproc);
1119 
1120 	return ret;
1121 }
1122 
q6v5_stop(struct rproc * rproc)1123 static int q6v5_stop(struct rproc *rproc)
1124 {
1125 	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1126 	int ret;
1127 
1128 	qproc->running = false;
1129 
1130 	ret = qcom_q6v5_request_stop(&qproc->q6v5);
1131 	if (ret == -ETIMEDOUT)
1132 		dev_err(qproc->dev, "timed out on wait\n");
1133 
1134 	q6v5_mba_reclaim(qproc);
1135 
1136 	return 0;
1137 }
1138 
q6v5_da_to_va(struct rproc * rproc,u64 da,int len)1139 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1140 {
1141 	struct q6v5 *qproc = rproc->priv;
1142 	int offset;
1143 
1144 	offset = da - qproc->mpss_reloc;
1145 	if (offset < 0 || offset + len > qproc->mpss_size)
1146 		return NULL;
1147 
1148 	return qproc->mpss_region + offset;
1149 }
1150 
qcom_q6v5_register_dump_segments(struct rproc * rproc,const struct firmware * mba_fw)1151 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1152 					    const struct firmware *mba_fw)
1153 {
1154 	const struct firmware *fw;
1155 	const struct elf32_phdr *phdrs;
1156 	const struct elf32_phdr *phdr;
1157 	const struct elf32_hdr *ehdr;
1158 	struct q6v5 *qproc = rproc->priv;
1159 	unsigned long i;
1160 	int ret;
1161 
1162 	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1163 	if (ret < 0) {
1164 		dev_err(qproc->dev, "unable to load %s\n",
1165 			qproc->hexagon_mdt_image);
1166 		return ret;
1167 	}
1168 
1169 	ehdr = (struct elf32_hdr *)fw->data;
1170 	phdrs = (struct elf32_phdr *)(ehdr + 1);
1171 	qproc->dump_complete_mask = 0;
1172 
1173 	for (i = 0; i < ehdr->e_phnum; i++) {
1174 		phdr = &phdrs[i];
1175 
1176 		if (!q6v5_phdr_valid(phdr))
1177 			continue;
1178 
1179 		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1180 							phdr->p_memsz,
1181 							qcom_q6v5_dump_segment,
1182 							(void *)i);
1183 		if (ret)
1184 			break;
1185 
1186 		qproc->dump_complete_mask |= BIT(i);
1187 	}
1188 
1189 	release_firmware(fw);
1190 	return ret;
1191 }
1192 
1193 static const struct rproc_ops q6v5_ops = {
1194 	.start = q6v5_start,
1195 	.stop = q6v5_stop,
1196 	.da_to_va = q6v5_da_to_va,
1197 	.parse_fw = qcom_q6v5_register_dump_segments,
1198 	.load = q6v5_load,
1199 };
1200 
qcom_msa_handover(struct qcom_q6v5 * q6v5)1201 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1202 {
1203 	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1204 
1205 	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1206 			 qproc->proxy_clk_count);
1207 	q6v5_regulator_disable(qproc, qproc->proxy_regs,
1208 			       qproc->proxy_reg_count);
1209 	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1210 }
1211 
q6v5_init_mem(struct q6v5 * qproc,struct platform_device * pdev)1212 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1213 {
1214 	struct of_phandle_args args;
1215 	struct resource *res;
1216 	int ret;
1217 
1218 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1219 	qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1220 	if (IS_ERR(qproc->reg_base))
1221 		return PTR_ERR(qproc->reg_base);
1222 
1223 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1224 	qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1225 	if (IS_ERR(qproc->rmb_base))
1226 		return PTR_ERR(qproc->rmb_base);
1227 
1228 	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1229 					       "qcom,halt-regs", 3, 0, &args);
1230 	if (ret < 0) {
1231 		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1232 		return -EINVAL;
1233 	}
1234 
1235 	qproc->halt_map = syscon_node_to_regmap(args.np);
1236 	of_node_put(args.np);
1237 	if (IS_ERR(qproc->halt_map))
1238 		return PTR_ERR(qproc->halt_map);
1239 
1240 	qproc->halt_q6 = args.args[0];
1241 	qproc->halt_modem = args.args[1];
1242 	qproc->halt_nc = args.args[2];
1243 
1244 	return 0;
1245 }
1246 
q6v5_init_clocks(struct device * dev,struct clk ** clks,char ** clk_names)1247 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1248 		char **clk_names)
1249 {
1250 	int i;
1251 
1252 	if (!clk_names)
1253 		return 0;
1254 
1255 	for (i = 0; clk_names[i]; i++) {
1256 		clks[i] = devm_clk_get(dev, clk_names[i]);
1257 		if (IS_ERR(clks[i])) {
1258 			int rc = PTR_ERR(clks[i]);
1259 
1260 			if (rc != -EPROBE_DEFER)
1261 				dev_err(dev, "Failed to get %s clock\n",
1262 					clk_names[i]);
1263 			return rc;
1264 		}
1265 	}
1266 
1267 	return i;
1268 }
1269 
q6v5_pds_attach(struct device * dev,struct device ** devs,char ** pd_names)1270 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1271 			   char **pd_names)
1272 {
1273 	size_t num_pds = 0;
1274 	int ret;
1275 	int i;
1276 
1277 	if (!pd_names)
1278 		return 0;
1279 
1280 	while (pd_names[num_pds])
1281 		num_pds++;
1282 
1283 	for (i = 0; i < num_pds; i++) {
1284 		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1285 		if (IS_ERR_OR_NULL(devs[i])) {
1286 			ret = PTR_ERR(devs[i]) ? : -ENODATA;
1287 			goto unroll_attach;
1288 		}
1289 	}
1290 
1291 	return num_pds;
1292 
1293 unroll_attach:
1294 	for (i--; i >= 0; i--)
1295 		dev_pm_domain_detach(devs[i], false);
1296 
1297 	return ret;
1298 };
1299 
q6v5_pds_detach(struct q6v5 * qproc,struct device ** pds,size_t pd_count)1300 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1301 			    size_t pd_count)
1302 {
1303 	int i;
1304 
1305 	for (i = 0; i < pd_count; i++)
1306 		dev_pm_domain_detach(pds[i], false);
1307 }
1308 
q6v5_init_reset(struct q6v5 * qproc)1309 static int q6v5_init_reset(struct q6v5 *qproc)
1310 {
1311 	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1312 							      "mss_restart");
1313 	if (IS_ERR(qproc->mss_restart)) {
1314 		dev_err(qproc->dev, "failed to acquire mss restart\n");
1315 		return PTR_ERR(qproc->mss_restart);
1316 	}
1317 
1318 	if (qproc->has_alt_reset) {
1319 		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1320 								    "pdc_reset");
1321 		if (IS_ERR(qproc->pdc_reset)) {
1322 			dev_err(qproc->dev, "failed to acquire pdc reset\n");
1323 			return PTR_ERR(qproc->pdc_reset);
1324 		}
1325 	}
1326 
1327 	return 0;
1328 }
1329 
q6v5_alloc_memory_region(struct q6v5 * qproc)1330 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1331 {
1332 	struct device_node *child;
1333 	struct device_node *node;
1334 	struct resource r;
1335 	int ret;
1336 
1337 	child = of_get_child_by_name(qproc->dev->of_node, "mba");
1338 	node = of_parse_phandle(child, "memory-region", 0);
1339 	ret = of_address_to_resource(node, 0, &r);
1340 	if (ret) {
1341 		dev_err(qproc->dev, "unable to resolve mba region\n");
1342 		return ret;
1343 	}
1344 	of_node_put(node);
1345 
1346 	qproc->mba_phys = r.start;
1347 	qproc->mba_size = resource_size(&r);
1348 	qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1349 	if (!qproc->mba_region) {
1350 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1351 			&r.start, qproc->mba_size);
1352 		return -EBUSY;
1353 	}
1354 
1355 	child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1356 	node = of_parse_phandle(child, "memory-region", 0);
1357 	ret = of_address_to_resource(node, 0, &r);
1358 	if (ret) {
1359 		dev_err(qproc->dev, "unable to resolve mpss region\n");
1360 		return ret;
1361 	}
1362 	of_node_put(node);
1363 
1364 	qproc->mpss_phys = qproc->mpss_reloc = r.start;
1365 	qproc->mpss_size = resource_size(&r);
1366 	qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1367 	if (!qproc->mpss_region) {
1368 		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1369 			&r.start, qproc->mpss_size);
1370 		return -EBUSY;
1371 	}
1372 
1373 	return 0;
1374 }
1375 
q6v5_probe(struct platform_device * pdev)1376 static int q6v5_probe(struct platform_device *pdev)
1377 {
1378 	const struct rproc_hexagon_res *desc;
1379 	struct q6v5 *qproc;
1380 	struct rproc *rproc;
1381 	const char *mba_image;
1382 	int ret;
1383 
1384 	desc = of_device_get_match_data(&pdev->dev);
1385 	if (!desc)
1386 		return -EINVAL;
1387 
1388 	if (desc->need_mem_protection && !qcom_scm_is_available())
1389 		return -EPROBE_DEFER;
1390 
1391 	mba_image = desc->hexagon_mba_image;
1392 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1393 					    0, &mba_image);
1394 	if (ret < 0 && ret != -EINVAL)
1395 		return ret;
1396 
1397 	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1398 			    mba_image, sizeof(*qproc));
1399 	if (!rproc) {
1400 		dev_err(&pdev->dev, "failed to allocate rproc\n");
1401 		return -ENOMEM;
1402 	}
1403 
1404 	rproc->auto_boot = false;
1405 
1406 	qproc = (struct q6v5 *)rproc->priv;
1407 	qproc->dev = &pdev->dev;
1408 	qproc->rproc = rproc;
1409 	qproc->hexagon_mdt_image = "modem.mdt";
1410 	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1411 					    1, &qproc->hexagon_mdt_image);
1412 	if (ret < 0 && ret != -EINVAL)
1413 		return ret;
1414 
1415 	platform_set_drvdata(pdev, qproc);
1416 
1417 	ret = q6v5_init_mem(qproc, pdev);
1418 	if (ret)
1419 		goto free_rproc;
1420 
1421 	ret = q6v5_alloc_memory_region(qproc);
1422 	if (ret)
1423 		goto free_rproc;
1424 
1425 	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1426 			       desc->proxy_clk_names);
1427 	if (ret < 0) {
1428 		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1429 		goto free_rproc;
1430 	}
1431 	qproc->proxy_clk_count = ret;
1432 
1433 	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1434 			       desc->reset_clk_names);
1435 	if (ret < 0) {
1436 		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1437 		goto free_rproc;
1438 	}
1439 	qproc->reset_clk_count = ret;
1440 
1441 	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1442 			       desc->active_clk_names);
1443 	if (ret < 0) {
1444 		dev_err(&pdev->dev, "Failed to get active clocks.\n");
1445 		goto free_rproc;
1446 	}
1447 	qproc->active_clk_count = ret;
1448 
1449 	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1450 				  desc->proxy_supply);
1451 	if (ret < 0) {
1452 		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1453 		goto free_rproc;
1454 	}
1455 	qproc->proxy_reg_count = ret;
1456 
1457 	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
1458 				  desc->active_supply);
1459 	if (ret < 0) {
1460 		dev_err(&pdev->dev, "Failed to get active regulators.\n");
1461 		goto free_rproc;
1462 	}
1463 	qproc->active_reg_count = ret;
1464 
1465 	ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1466 			      desc->active_pd_names);
1467 	if (ret < 0) {
1468 		dev_err(&pdev->dev, "Failed to attach active power domains\n");
1469 		goto free_rproc;
1470 	}
1471 	qproc->active_pd_count = ret;
1472 
1473 	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1474 			      desc->proxy_pd_names);
1475 	if (ret < 0) {
1476 		dev_err(&pdev->dev, "Failed to init power domains\n");
1477 		goto detach_active_pds;
1478 	}
1479 	qproc->proxy_pd_count = ret;
1480 
1481 	qproc->has_alt_reset = desc->has_alt_reset;
1482 	ret = q6v5_init_reset(qproc);
1483 	if (ret)
1484 		goto detach_proxy_pds;
1485 
1486 	qproc->version = desc->version;
1487 	qproc->need_mem_protection = desc->need_mem_protection;
1488 
1489 	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1490 			     qcom_msa_handover);
1491 	if (ret)
1492 		goto detach_proxy_pds;
1493 
1494 	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1495 	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1496 	qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
1497 	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1498 	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1499 	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1500 	if (IS_ERR(qproc->sysmon)) {
1501 		ret = PTR_ERR(qproc->sysmon);
1502 		goto detach_proxy_pds;
1503 	}
1504 
1505 	ret = rproc_add(rproc);
1506 	if (ret)
1507 		goto detach_proxy_pds;
1508 
1509 	return 0;
1510 
1511 detach_proxy_pds:
1512 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1513 detach_active_pds:
1514 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1515 free_rproc:
1516 	rproc_free(rproc);
1517 
1518 	return ret;
1519 }
1520 
q6v5_remove(struct platform_device * pdev)1521 static int q6v5_remove(struct platform_device *pdev)
1522 {
1523 	struct q6v5 *qproc = platform_get_drvdata(pdev);
1524 
1525 	rproc_del(qproc->rproc);
1526 
1527 	qcom_remove_sysmon_subdev(qproc->sysmon);
1528 	qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
1529 	qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1530 	qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1531 
1532 	q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1533 	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1534 
1535 	rproc_free(qproc->rproc);
1536 
1537 	return 0;
1538 }
1539 
1540 static const struct rproc_hexagon_res sdm845_mss = {
1541 	.hexagon_mba_image = "mba.mbn",
1542 	.proxy_clk_names = (char*[]){
1543 			"xo",
1544 			"prng",
1545 			NULL
1546 	},
1547 	.reset_clk_names = (char*[]){
1548 			"iface",
1549 			"snoc_axi",
1550 			NULL
1551 	},
1552 	.active_clk_names = (char*[]){
1553 			"bus",
1554 			"mem",
1555 			"gpll0_mss",
1556 			"mnoc_axi",
1557 			NULL
1558 	},
1559 	.active_pd_names = (char*[]){
1560 			"load_state",
1561 			NULL
1562 	},
1563 	.proxy_pd_names = (char*[]){
1564 			"cx",
1565 			"mx",
1566 			"mss",
1567 			NULL
1568 	},
1569 	.need_mem_protection = true,
1570 	.has_alt_reset = true,
1571 	.version = MSS_SDM845,
1572 };
1573 
1574 static const struct rproc_hexagon_res msm8996_mss = {
1575 	.hexagon_mba_image = "mba.mbn",
1576 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1577 		{
1578 			.supply = "pll",
1579 			.uA = 100000,
1580 		},
1581 		{}
1582 	},
1583 	.proxy_clk_names = (char*[]){
1584 			"xo",
1585 			"pnoc",
1586 			"qdss",
1587 			NULL
1588 	},
1589 	.active_clk_names = (char*[]){
1590 			"iface",
1591 			"bus",
1592 			"mem",
1593 			"gpll0_mss",
1594 			"snoc_axi",
1595 			"mnoc_axi",
1596 			NULL
1597 	},
1598 	.need_mem_protection = true,
1599 	.has_alt_reset = false,
1600 	.version = MSS_MSM8996,
1601 };
1602 
1603 static const struct rproc_hexagon_res msm8916_mss = {
1604 	.hexagon_mba_image = "mba.mbn",
1605 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1606 		{
1607 			.supply = "mx",
1608 			.uV = 1050000,
1609 		},
1610 		{
1611 			.supply = "cx",
1612 			.uA = 100000,
1613 		},
1614 		{
1615 			.supply = "pll",
1616 			.uA = 100000,
1617 		},
1618 		{}
1619 	},
1620 	.proxy_clk_names = (char*[]){
1621 		"xo",
1622 		NULL
1623 	},
1624 	.active_clk_names = (char*[]){
1625 		"iface",
1626 		"bus",
1627 		"mem",
1628 		NULL
1629 	},
1630 	.need_mem_protection = false,
1631 	.has_alt_reset = false,
1632 	.version = MSS_MSM8916,
1633 };
1634 
1635 static const struct rproc_hexagon_res msm8974_mss = {
1636 	.hexagon_mba_image = "mba.b00",
1637 	.proxy_supply = (struct qcom_mss_reg_res[]) {
1638 		{
1639 			.supply = "mx",
1640 			.uV = 1050000,
1641 		},
1642 		{
1643 			.supply = "cx",
1644 			.uA = 100000,
1645 		},
1646 		{
1647 			.supply = "pll",
1648 			.uA = 100000,
1649 		},
1650 		{}
1651 	},
1652 	.active_supply = (struct qcom_mss_reg_res[]) {
1653 		{
1654 			.supply = "mss",
1655 			.uV = 1050000,
1656 			.uA = 100000,
1657 		},
1658 		{}
1659 	},
1660 	.proxy_clk_names = (char*[]){
1661 		"xo",
1662 		NULL
1663 	},
1664 	.active_clk_names = (char*[]){
1665 		"iface",
1666 		"bus",
1667 		"mem",
1668 		NULL
1669 	},
1670 	.need_mem_protection = false,
1671 	.has_alt_reset = false,
1672 	.version = MSS_MSM8974,
1673 };
1674 
1675 static const struct of_device_id q6v5_of_match[] = {
1676 	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1677 	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1678 	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1679 	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1680 	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1681 	{ },
1682 };
1683 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1684 
1685 static struct platform_driver q6v5_driver = {
1686 	.probe = q6v5_probe,
1687 	.remove = q6v5_remove,
1688 	.driver = {
1689 		.name = "qcom-q6v5-mss",
1690 		.of_match_table = q6v5_of_match,
1691 	},
1692 };
1693 module_platform_driver(q6v5_driver);
1694 
1695 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1696 MODULE_LICENSE("GPL v2");
1697