1 /*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/regmap.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/remoteproc.h>
31 #include <linux/reset.h>
32 #include <linux/soc/qcom/mdt_loader.h>
33 #include <linux/soc/qcom/smem.h>
34 #include <linux/soc/qcom/smem_state.h>
35
36 #include "remoteproc_internal.h"
37 #include "qcom_common.h"
38
39 #include <linux/qcom_scm.h>
40
41 #define MPSS_CRASH_REASON_SMEM 421
42
43 /* RMB Status Register Values */
44 #define RMB_PBL_SUCCESS 0x1
45
46 #define RMB_MBA_XPU_UNLOCKED 0x1
47 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
48 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
49 #define RMB_MBA_AUTH_COMPLETE 0x4
50
51 /* PBL/MBA interface registers */
52 #define RMB_MBA_IMAGE_REG 0x00
53 #define RMB_PBL_STATUS_REG 0x04
54 #define RMB_MBA_COMMAND_REG 0x08
55 #define RMB_MBA_STATUS_REG 0x0C
56 #define RMB_PMI_META_DATA_REG 0x10
57 #define RMB_PMI_CODE_START_REG 0x14
58 #define RMB_PMI_CODE_LENGTH_REG 0x18
59
60 #define RMB_CMD_META_DATA_READY 0x1
61 #define RMB_CMD_LOAD_READY 0x2
62
63 /* QDSP6SS Register Offsets */
64 #define QDSP6SS_RESET_REG 0x014
65 #define QDSP6SS_GFMUX_CTL_REG 0x020
66 #define QDSP6SS_PWR_CTL_REG 0x030
67
68 /* AXI Halt Register Offsets */
69 #define AXI_HALTREQ_REG 0x0
70 #define AXI_HALTACK_REG 0x4
71 #define AXI_IDLE_REG 0x8
72
73 #define HALT_ACK_TIMEOUT_MS 100
74
75 /* QDSP6SS_RESET */
76 #define Q6SS_STOP_CORE BIT(0)
77 #define Q6SS_CORE_ARES BIT(1)
78 #define Q6SS_BUS_ARES_ENABLE BIT(2)
79
80 /* QDSP6SS_GFMUX_CTL */
81 #define Q6SS_CLK_ENABLE BIT(1)
82
83 /* QDSP6SS_PWR_CTL */
84 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
85 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
86 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
87 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
88 #define Q6SS_ETB_SLP_NRET_N BIT(17)
89 #define Q6SS_L2DATA_STBY_N BIT(18)
90 #define Q6SS_SLP_RET_N BIT(19)
91 #define Q6SS_CLAMP_IO BIT(20)
92 #define QDSS_BHS_ON BIT(21)
93 #define QDSS_LDO_BYP BIT(22)
94
95 struct reg_info {
96 struct regulator *reg;
97 int uV;
98 int uA;
99 };
100
101 struct qcom_mss_reg_res {
102 const char *supply;
103 int uV;
104 int uA;
105 };
106
107 struct rproc_hexagon_res {
108 const char *hexagon_mba_image;
109 struct qcom_mss_reg_res *proxy_supply;
110 struct qcom_mss_reg_res *active_supply;
111 char **proxy_clk_names;
112 char **active_clk_names;
113 };
114
115 struct q6v5 {
116 struct device *dev;
117 struct rproc *rproc;
118
119 void __iomem *reg_base;
120 void __iomem *rmb_base;
121
122 struct regmap *halt_map;
123 u32 halt_q6;
124 u32 halt_modem;
125 u32 halt_nc;
126
127 struct reset_control *mss_restart;
128
129 struct qcom_smem_state *state;
130 unsigned stop_bit;
131
132 struct clk *active_clks[8];
133 struct clk *proxy_clks[4];
134 int active_clk_count;
135 int proxy_clk_count;
136
137 struct reg_info active_regs[1];
138 struct reg_info proxy_regs[3];
139 int active_reg_count;
140 int proxy_reg_count;
141
142 struct completion start_done;
143 struct completion stop_done;
144 bool running;
145
146 phys_addr_t mba_phys;
147 void *mba_region;
148 size_t mba_size;
149
150 phys_addr_t mpss_phys;
151 phys_addr_t mpss_reloc;
152 void *mpss_region;
153 size_t mpss_size;
154
155 struct qcom_rproc_subdev smd_subdev;
156 struct qcom_rproc_ssr ssr_subdev;
157 };
158
q6v5_regulator_init(struct device * dev,struct reg_info * regs,const struct qcom_mss_reg_res * reg_res)159 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
160 const struct qcom_mss_reg_res *reg_res)
161 {
162 int rc;
163 int i;
164
165 if (!reg_res)
166 return 0;
167
168 for (i = 0; reg_res[i].supply; i++) {
169 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
170 if (IS_ERR(regs[i].reg)) {
171 rc = PTR_ERR(regs[i].reg);
172 if (rc != -EPROBE_DEFER)
173 dev_err(dev, "Failed to get %s\n regulator",
174 reg_res[i].supply);
175 return rc;
176 }
177
178 regs[i].uV = reg_res[i].uV;
179 regs[i].uA = reg_res[i].uA;
180 }
181
182 return i;
183 }
184
q6v5_regulator_enable(struct q6v5 * qproc,struct reg_info * regs,int count)185 static int q6v5_regulator_enable(struct q6v5 *qproc,
186 struct reg_info *regs, int count)
187 {
188 int ret;
189 int i;
190
191 for (i = 0; i < count; i++) {
192 if (regs[i].uV > 0) {
193 ret = regulator_set_voltage(regs[i].reg,
194 regs[i].uV, INT_MAX);
195 if (ret) {
196 dev_err(qproc->dev,
197 "Failed to request voltage for %d.\n",
198 i);
199 goto err;
200 }
201 }
202
203 if (regs[i].uA > 0) {
204 ret = regulator_set_load(regs[i].reg,
205 regs[i].uA);
206 if (ret < 0) {
207 dev_err(qproc->dev,
208 "Failed to set regulator mode\n");
209 goto err;
210 }
211 }
212
213 ret = regulator_enable(regs[i].reg);
214 if (ret) {
215 dev_err(qproc->dev, "Regulator enable failed\n");
216 goto err;
217 }
218 }
219
220 return 0;
221 err:
222 for (; i >= 0; i--) {
223 if (regs[i].uV > 0)
224 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
225
226 if (regs[i].uA > 0)
227 regulator_set_load(regs[i].reg, 0);
228
229 regulator_disable(regs[i].reg);
230 }
231
232 return ret;
233 }
234
q6v5_regulator_disable(struct q6v5 * qproc,struct reg_info * regs,int count)235 static void q6v5_regulator_disable(struct q6v5 *qproc,
236 struct reg_info *regs, int count)
237 {
238 int i;
239
240 for (i = 0; i < count; i++) {
241 if (regs[i].uV > 0)
242 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
243
244 if (regs[i].uA > 0)
245 regulator_set_load(regs[i].reg, 0);
246
247 regulator_disable(regs[i].reg);
248 }
249 }
250
q6v5_clk_enable(struct device * dev,struct clk ** clks,int count)251 static int q6v5_clk_enable(struct device *dev,
252 struct clk **clks, int count)
253 {
254 int rc;
255 int i;
256
257 for (i = 0; i < count; i++) {
258 rc = clk_prepare_enable(clks[i]);
259 if (rc) {
260 dev_err(dev, "Clock enable failed\n");
261 goto err;
262 }
263 }
264
265 return 0;
266 err:
267 for (i--; i >= 0; i--)
268 clk_disable_unprepare(clks[i]);
269
270 return rc;
271 }
272
q6v5_clk_disable(struct device * dev,struct clk ** clks,int count)273 static void q6v5_clk_disable(struct device *dev,
274 struct clk **clks, int count)
275 {
276 int i;
277
278 for (i = 0; i < count; i++)
279 clk_disable_unprepare(clks[i]);
280 }
281
q6v5_find_rsc_table(struct rproc * rproc,const struct firmware * fw,int * tablesz)282 static struct resource_table *q6v5_find_rsc_table(struct rproc *rproc,
283 const struct firmware *fw,
284 int *tablesz)
285 {
286 static struct resource_table table = { .ver = 1, };
287
288 *tablesz = sizeof(table);
289 return &table;
290 }
291
q6v5_load(struct rproc * rproc,const struct firmware * fw)292 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
293 {
294 struct q6v5 *qproc = rproc->priv;
295
296 memcpy(qproc->mba_region, fw->data, fw->size);
297
298 return 0;
299 }
300
301 static const struct rproc_fw_ops q6v5_fw_ops = {
302 .find_rsc_table = q6v5_find_rsc_table,
303 .load = q6v5_load,
304 };
305
q6v5_rmb_pbl_wait(struct q6v5 * qproc,int ms)306 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
307 {
308 unsigned long timeout;
309 s32 val;
310
311 timeout = jiffies + msecs_to_jiffies(ms);
312 for (;;) {
313 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
314 if (val)
315 break;
316
317 if (time_after(jiffies, timeout))
318 return -ETIMEDOUT;
319
320 msleep(1);
321 }
322
323 return val;
324 }
325
q6v5_rmb_mba_wait(struct q6v5 * qproc,u32 status,int ms)326 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
327 {
328
329 unsigned long timeout;
330 s32 val;
331
332 timeout = jiffies + msecs_to_jiffies(ms);
333 for (;;) {
334 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
335 if (val < 0)
336 break;
337
338 if (!status && val)
339 break;
340 else if (status && val == status)
341 break;
342
343 if (time_after(jiffies, timeout))
344 return -ETIMEDOUT;
345
346 msleep(1);
347 }
348
349 return val;
350 }
351
q6v5proc_reset(struct q6v5 * qproc)352 static int q6v5proc_reset(struct q6v5 *qproc)
353 {
354 u32 val;
355 int ret;
356
357 /* Assert resets, stop core */
358 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
359 val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
360 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
361
362 /* Enable power block headswitch, and wait for it to stabilize */
363 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
364 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
365 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
366 udelay(1);
367
368 /*
369 * Turn on memories. L2 banks should be done individually
370 * to minimize inrush current.
371 */
372 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
373 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
374 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
375 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
376 val |= Q6SS_L2DATA_SLP_NRET_N_2;
377 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
378 val |= Q6SS_L2DATA_SLP_NRET_N_1;
379 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
380 val |= Q6SS_L2DATA_SLP_NRET_N_0;
381 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
382
383 /* Remove IO clamp */
384 val &= ~Q6SS_CLAMP_IO;
385 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
386
387 /* Bring core out of reset */
388 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
389 val &= ~Q6SS_CORE_ARES;
390 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
391
392 /* Turn on core clock */
393 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
394 val |= Q6SS_CLK_ENABLE;
395 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
396
397 /* Start core execution */
398 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
399 val &= ~Q6SS_STOP_CORE;
400 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
401
402 /* Wait for PBL status */
403 ret = q6v5_rmb_pbl_wait(qproc, 1000);
404 if (ret == -ETIMEDOUT) {
405 dev_err(qproc->dev, "PBL boot timed out\n");
406 } else if (ret != RMB_PBL_SUCCESS) {
407 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
408 ret = -EINVAL;
409 } else {
410 ret = 0;
411 }
412
413 return ret;
414 }
415
q6v5proc_halt_axi_port(struct q6v5 * qproc,struct regmap * halt_map,u32 offset)416 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
417 struct regmap *halt_map,
418 u32 offset)
419 {
420 unsigned long timeout;
421 unsigned int val;
422 int ret;
423
424 /* Check if we're already idle */
425 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
426 if (!ret && val)
427 return;
428
429 /* Assert halt request */
430 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
431
432 /* Wait for halt */
433 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
434 for (;;) {
435 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
436 if (ret || val || time_after(jiffies, timeout))
437 break;
438
439 msleep(1);
440 }
441
442 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
443 if (ret || !val)
444 dev_err(qproc->dev, "port failed halt\n");
445
446 /* Clear halt request (port will remain halted until reset) */
447 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
448 }
449
q6v5_mpss_init_image(struct q6v5 * qproc,const struct firmware * fw)450 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
451 {
452 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
453 dma_addr_t phys;
454 void *ptr;
455 int ret;
456
457 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
458 if (!ptr) {
459 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
460 return -ENOMEM;
461 }
462
463 memcpy(ptr, fw->data, fw->size);
464
465 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
466 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
467
468 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
469 if (ret == -ETIMEDOUT)
470 dev_err(qproc->dev, "MPSS header authentication timed out\n");
471 else if (ret < 0)
472 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
473
474 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
475
476 return ret < 0 ? ret : 0;
477 }
478
q6v5_phdr_valid(const struct elf32_phdr * phdr)479 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
480 {
481 if (phdr->p_type != PT_LOAD)
482 return false;
483
484 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
485 return false;
486
487 if (!phdr->p_memsz)
488 return false;
489
490 return true;
491 }
492
q6v5_mpss_load(struct q6v5 * qproc)493 static int q6v5_mpss_load(struct q6v5 *qproc)
494 {
495 const struct elf32_phdr *phdrs;
496 const struct elf32_phdr *phdr;
497 const struct firmware *seg_fw;
498 const struct firmware *fw;
499 struct elf32_hdr *ehdr;
500 phys_addr_t mpss_reloc;
501 phys_addr_t boot_addr;
502 phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
503 phys_addr_t max_addr = 0;
504 bool relocate = false;
505 char seg_name[10];
506 ssize_t offset;
507 size_t size;
508 void *ptr;
509 int ret;
510 int i;
511
512 ret = request_firmware(&fw, "modem.mdt", qproc->dev);
513 if (ret < 0) {
514 dev_err(qproc->dev, "unable to load modem.mdt\n");
515 return ret;
516 }
517
518 /* Initialize the RMB validator */
519 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
520
521 ret = q6v5_mpss_init_image(qproc, fw);
522 if (ret)
523 goto release_firmware;
524
525 ehdr = (struct elf32_hdr *)fw->data;
526 phdrs = (struct elf32_phdr *)(ehdr + 1);
527
528 for (i = 0; i < ehdr->e_phnum; i++) {
529 phdr = &phdrs[i];
530
531 if (!q6v5_phdr_valid(phdr))
532 continue;
533
534 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
535 relocate = true;
536
537 if (phdr->p_paddr < min_addr)
538 min_addr = phdr->p_paddr;
539
540 if (phdr->p_paddr + phdr->p_memsz > max_addr)
541 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
542 }
543
544 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
545
546 for (i = 0; i < ehdr->e_phnum; i++) {
547 phdr = &phdrs[i];
548
549 if (!q6v5_phdr_valid(phdr))
550 continue;
551
552 offset = phdr->p_paddr - mpss_reloc;
553 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
554 dev_err(qproc->dev, "segment outside memory range\n");
555 ret = -EINVAL;
556 goto release_firmware;
557 }
558
559 ptr = qproc->mpss_region + offset;
560
561 if (phdr->p_filesz) {
562 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i);
563 ret = request_firmware(&seg_fw, seg_name, qproc->dev);
564 if (ret) {
565 dev_err(qproc->dev, "failed to load %s\n", seg_name);
566 goto release_firmware;
567 }
568
569 memcpy(ptr, seg_fw->data, seg_fw->size);
570
571 release_firmware(seg_fw);
572 }
573
574 if (phdr->p_memsz > phdr->p_filesz) {
575 memset(ptr + phdr->p_filesz, 0,
576 phdr->p_memsz - phdr->p_filesz);
577 }
578
579 size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
580 if (!size) {
581 boot_addr = relocate ? qproc->mpss_phys : min_addr;
582 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
583 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
584 }
585
586 size += phdr->p_memsz;
587 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
588 }
589
590 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
591 if (ret == -ETIMEDOUT)
592 dev_err(qproc->dev, "MPSS authentication timed out\n");
593 else if (ret < 0)
594 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
595
596 release_firmware:
597 release_firmware(fw);
598
599 return ret < 0 ? ret : 0;
600 }
601
q6v5_start(struct rproc * rproc)602 static int q6v5_start(struct rproc *rproc)
603 {
604 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
605 int ret;
606
607 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
608 qproc->proxy_reg_count);
609 if (ret) {
610 dev_err(qproc->dev, "failed to enable proxy supplies\n");
611 return ret;
612 }
613
614 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
615 qproc->proxy_clk_count);
616 if (ret) {
617 dev_err(qproc->dev, "failed to enable proxy clocks\n");
618 goto disable_proxy_reg;
619 }
620
621 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
622 qproc->active_reg_count);
623 if (ret) {
624 dev_err(qproc->dev, "failed to enable supplies\n");
625 goto disable_proxy_clk;
626 }
627 ret = reset_control_deassert(qproc->mss_restart);
628 if (ret) {
629 dev_err(qproc->dev, "failed to deassert mss restart\n");
630 goto disable_vdd;
631 }
632
633 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
634 qproc->active_clk_count);
635 if (ret) {
636 dev_err(qproc->dev, "failed to enable clocks\n");
637 goto assert_reset;
638 }
639
640 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
641
642 ret = q6v5proc_reset(qproc);
643 if (ret)
644 goto halt_axi_ports;
645
646 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
647 if (ret == -ETIMEDOUT) {
648 dev_err(qproc->dev, "MBA boot timed out\n");
649 goto halt_axi_ports;
650 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
651 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
652 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
653 ret = -EINVAL;
654 goto halt_axi_ports;
655 }
656
657 dev_info(qproc->dev, "MBA booted, loading mpss\n");
658
659 ret = q6v5_mpss_load(qproc);
660 if (ret)
661 goto halt_axi_ports;
662
663 ret = wait_for_completion_timeout(&qproc->start_done,
664 msecs_to_jiffies(5000));
665 if (ret == 0) {
666 dev_err(qproc->dev, "start timed out\n");
667 ret = -ETIMEDOUT;
668 goto halt_axi_ports;
669 }
670
671 qproc->running = true;
672
673 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
674 qproc->proxy_clk_count);
675 q6v5_regulator_disable(qproc, qproc->proxy_regs,
676 qproc->proxy_reg_count);
677
678 return 0;
679
680 halt_axi_ports:
681 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
682 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
683 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
684 q6v5_clk_disable(qproc->dev, qproc->active_clks,
685 qproc->active_clk_count);
686 assert_reset:
687 reset_control_assert(qproc->mss_restart);
688 disable_vdd:
689 q6v5_regulator_disable(qproc, qproc->active_regs,
690 qproc->active_reg_count);
691 disable_proxy_clk:
692 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
693 qproc->proxy_clk_count);
694 disable_proxy_reg:
695 q6v5_regulator_disable(qproc, qproc->proxy_regs,
696 qproc->proxy_reg_count);
697
698 return ret;
699 }
700
q6v5_stop(struct rproc * rproc)701 static int q6v5_stop(struct rproc *rproc)
702 {
703 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
704 int ret;
705
706 qproc->running = false;
707
708 qcom_smem_state_update_bits(qproc->state,
709 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
710
711 ret = wait_for_completion_timeout(&qproc->stop_done,
712 msecs_to_jiffies(5000));
713 if (ret == 0)
714 dev_err(qproc->dev, "timed out on wait\n");
715
716 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
717
718 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
719 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
720 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
721
722 reset_control_assert(qproc->mss_restart);
723 q6v5_clk_disable(qproc->dev, qproc->active_clks,
724 qproc->active_clk_count);
725 q6v5_regulator_disable(qproc, qproc->active_regs,
726 qproc->active_reg_count);
727
728 return 0;
729 }
730
q6v5_da_to_va(struct rproc * rproc,u64 da,int len)731 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
732 {
733 struct q6v5 *qproc = rproc->priv;
734 int offset;
735
736 offset = da - qproc->mpss_reloc;
737 if (offset < 0 || offset + len > qproc->mpss_size)
738 return NULL;
739
740 return qproc->mpss_region + offset;
741 }
742
743 static const struct rproc_ops q6v5_ops = {
744 .start = q6v5_start,
745 .stop = q6v5_stop,
746 .da_to_va = q6v5_da_to_va,
747 };
748
q6v5_wdog_interrupt(int irq,void * dev)749 static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
750 {
751 struct q6v5 *qproc = dev;
752 size_t len;
753 char *msg;
754
755 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
756 if (!qproc->running) {
757 complete(&qproc->stop_done);
758 return IRQ_HANDLED;
759 }
760
761 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
762 if (!IS_ERR(msg) && len > 0 && msg[0])
763 dev_err(qproc->dev, "watchdog received: %s\n", msg);
764 else
765 dev_err(qproc->dev, "watchdog without message\n");
766
767 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
768
769 if (!IS_ERR(msg))
770 msg[0] = '\0';
771
772 return IRQ_HANDLED;
773 }
774
q6v5_fatal_interrupt(int irq,void * dev)775 static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
776 {
777 struct q6v5 *qproc = dev;
778 size_t len;
779 char *msg;
780
781 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
782 if (!IS_ERR(msg) && len > 0 && msg[0])
783 dev_err(qproc->dev, "fatal error received: %s\n", msg);
784 else
785 dev_err(qproc->dev, "fatal error without message\n");
786
787 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
788
789 if (!IS_ERR(msg))
790 msg[0] = '\0';
791
792 return IRQ_HANDLED;
793 }
794
q6v5_handover_interrupt(int irq,void * dev)795 static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
796 {
797 struct q6v5 *qproc = dev;
798
799 complete(&qproc->start_done);
800 return IRQ_HANDLED;
801 }
802
q6v5_stop_ack_interrupt(int irq,void * dev)803 static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
804 {
805 struct q6v5 *qproc = dev;
806
807 complete(&qproc->stop_done);
808 return IRQ_HANDLED;
809 }
810
q6v5_init_mem(struct q6v5 * qproc,struct platform_device * pdev)811 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
812 {
813 struct of_phandle_args args;
814 struct resource *res;
815 int ret;
816
817 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
818 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
819 if (IS_ERR(qproc->reg_base))
820 return PTR_ERR(qproc->reg_base);
821
822 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
823 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
824 if (IS_ERR(qproc->rmb_base))
825 return PTR_ERR(qproc->rmb_base);
826
827 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
828 "qcom,halt-regs", 3, 0, &args);
829 if (ret < 0) {
830 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
831 return -EINVAL;
832 }
833
834 qproc->halt_map = syscon_node_to_regmap(args.np);
835 of_node_put(args.np);
836 if (IS_ERR(qproc->halt_map))
837 return PTR_ERR(qproc->halt_map);
838
839 qproc->halt_q6 = args.args[0];
840 qproc->halt_modem = args.args[1];
841 qproc->halt_nc = args.args[2];
842
843 return 0;
844 }
845
q6v5_init_clocks(struct device * dev,struct clk ** clks,char ** clk_names)846 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
847 char **clk_names)
848 {
849 int i;
850
851 if (!clk_names)
852 return 0;
853
854 for (i = 0; clk_names[i]; i++) {
855 clks[i] = devm_clk_get(dev, clk_names[i]);
856 if (IS_ERR(clks[i])) {
857 int rc = PTR_ERR(clks[i]);
858
859 if (rc != -EPROBE_DEFER)
860 dev_err(dev, "Failed to get %s clock\n",
861 clk_names[i]);
862 return rc;
863 }
864 }
865
866 return i;
867 }
868
q6v5_init_reset(struct q6v5 * qproc)869 static int q6v5_init_reset(struct q6v5 *qproc)
870 {
871 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
872 NULL);
873 if (IS_ERR(qproc->mss_restart)) {
874 dev_err(qproc->dev, "failed to acquire mss restart\n");
875 return PTR_ERR(qproc->mss_restart);
876 }
877
878 return 0;
879 }
880
q6v5_request_irq(struct q6v5 * qproc,struct platform_device * pdev,const char * name,irq_handler_t thread_fn)881 static int q6v5_request_irq(struct q6v5 *qproc,
882 struct platform_device *pdev,
883 const char *name,
884 irq_handler_t thread_fn)
885 {
886 int ret;
887
888 ret = platform_get_irq_byname(pdev, name);
889 if (ret < 0) {
890 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
891 return ret;
892 }
893
894 ret = devm_request_threaded_irq(&pdev->dev, ret,
895 NULL, thread_fn,
896 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
897 "q6v5", qproc);
898 if (ret)
899 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
900
901 return ret;
902 }
903
q6v5_alloc_memory_region(struct q6v5 * qproc)904 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
905 {
906 struct device_node *child;
907 struct device_node *node;
908 struct resource r;
909 int ret;
910
911 child = of_get_child_by_name(qproc->dev->of_node, "mba");
912 node = of_parse_phandle(child, "memory-region", 0);
913 ret = of_address_to_resource(node, 0, &r);
914 if (ret) {
915 dev_err(qproc->dev, "unable to resolve mba region\n");
916 return ret;
917 }
918 of_node_put(node);
919
920 qproc->mba_phys = r.start;
921 qproc->mba_size = resource_size(&r);
922 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
923 if (!qproc->mba_region) {
924 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
925 &r.start, qproc->mba_size);
926 return -EBUSY;
927 }
928
929 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
930 node = of_parse_phandle(child, "memory-region", 0);
931 ret = of_address_to_resource(node, 0, &r);
932 if (ret) {
933 dev_err(qproc->dev, "unable to resolve mpss region\n");
934 return ret;
935 }
936 of_node_put(node);
937
938 qproc->mpss_phys = qproc->mpss_reloc = r.start;
939 qproc->mpss_size = resource_size(&r);
940 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
941 if (!qproc->mpss_region) {
942 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
943 &r.start, qproc->mpss_size);
944 return -EBUSY;
945 }
946
947 return 0;
948 }
949
q6v5_probe(struct platform_device * pdev)950 static int q6v5_probe(struct platform_device *pdev)
951 {
952 const struct rproc_hexagon_res *desc;
953 struct q6v5 *qproc;
954 struct rproc *rproc;
955 int ret;
956
957 desc = of_device_get_match_data(&pdev->dev);
958 if (!desc)
959 return -EINVAL;
960
961 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
962 desc->hexagon_mba_image, sizeof(*qproc));
963 if (!rproc) {
964 dev_err(&pdev->dev, "failed to allocate rproc\n");
965 return -ENOMEM;
966 }
967
968 rproc->fw_ops = &q6v5_fw_ops;
969
970 qproc = (struct q6v5 *)rproc->priv;
971 qproc->dev = &pdev->dev;
972 qproc->rproc = rproc;
973 platform_set_drvdata(pdev, qproc);
974
975 init_completion(&qproc->start_done);
976 init_completion(&qproc->stop_done);
977
978 ret = q6v5_init_mem(qproc, pdev);
979 if (ret)
980 goto free_rproc;
981
982 ret = q6v5_alloc_memory_region(qproc);
983 if (ret)
984 goto free_rproc;
985
986 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
987 desc->proxy_clk_names);
988 if (ret < 0) {
989 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
990 goto free_rproc;
991 }
992 qproc->proxy_clk_count = ret;
993
994 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
995 desc->active_clk_names);
996 if (ret < 0) {
997 dev_err(&pdev->dev, "Failed to get active clocks.\n");
998 goto free_rproc;
999 }
1000 qproc->active_clk_count = ret;
1001
1002 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1003 desc->proxy_supply);
1004 if (ret < 0) {
1005 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1006 goto free_rproc;
1007 }
1008 qproc->proxy_reg_count = ret;
1009
1010 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1011 desc->active_supply);
1012 if (ret < 0) {
1013 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1014 goto free_rproc;
1015 }
1016 qproc->active_reg_count = ret;
1017
1018 ret = q6v5_init_reset(qproc);
1019 if (ret)
1020 goto free_rproc;
1021
1022 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
1023 if (ret < 0)
1024 goto free_rproc;
1025
1026 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
1027 if (ret < 0)
1028 goto free_rproc;
1029
1030 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
1031 if (ret < 0)
1032 goto free_rproc;
1033
1034 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
1035 if (ret < 0)
1036 goto free_rproc;
1037
1038 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
1039 if (IS_ERR(qproc->state)) {
1040 ret = PTR_ERR(qproc->state);
1041 goto free_rproc;
1042 }
1043
1044 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1045 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1046
1047 ret = rproc_add(rproc);
1048 if (ret)
1049 goto free_rproc;
1050
1051 return 0;
1052
1053 free_rproc:
1054 rproc_free(rproc);
1055
1056 return ret;
1057 }
1058
q6v5_remove(struct platform_device * pdev)1059 static int q6v5_remove(struct platform_device *pdev)
1060 {
1061 struct q6v5 *qproc = platform_get_drvdata(pdev);
1062
1063 rproc_del(qproc->rproc);
1064
1065 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1066 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1067 rproc_free(qproc->rproc);
1068
1069 return 0;
1070 }
1071
1072 static const struct rproc_hexagon_res msm8916_mss = {
1073 .hexagon_mba_image = "mba.mbn",
1074 .proxy_supply = (struct qcom_mss_reg_res[]) {
1075 {
1076 .supply = "mx",
1077 .uV = 1050000,
1078 },
1079 {
1080 .supply = "cx",
1081 .uA = 100000,
1082 },
1083 {
1084 .supply = "pll",
1085 .uA = 100000,
1086 },
1087 {}
1088 },
1089 .proxy_clk_names = (char*[]){
1090 "xo",
1091 NULL
1092 },
1093 .active_clk_names = (char*[]){
1094 "iface",
1095 "bus",
1096 "mem",
1097 NULL
1098 },
1099 };
1100
1101 static const struct rproc_hexagon_res msm8974_mss = {
1102 .hexagon_mba_image = "mba.b00",
1103 .proxy_supply = (struct qcom_mss_reg_res[]) {
1104 {
1105 .supply = "mx",
1106 .uV = 1050000,
1107 },
1108 {
1109 .supply = "cx",
1110 .uA = 100000,
1111 },
1112 {
1113 .supply = "pll",
1114 .uA = 100000,
1115 },
1116 {}
1117 },
1118 .active_supply = (struct qcom_mss_reg_res[]) {
1119 {
1120 .supply = "mss",
1121 .uV = 1050000,
1122 .uA = 100000,
1123 },
1124 {}
1125 },
1126 .proxy_clk_names = (char*[]){
1127 "xo",
1128 NULL
1129 },
1130 .active_clk_names = (char*[]){
1131 "iface",
1132 "bus",
1133 "mem",
1134 NULL
1135 },
1136 };
1137
1138 static const struct of_device_id q6v5_of_match[] = {
1139 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1140 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1141 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1142 { },
1143 };
1144 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1145
1146 static struct platform_driver q6v5_driver = {
1147 .probe = q6v5_probe,
1148 .remove = q6v5_remove,
1149 .driver = {
1150 .name = "qcom-q6v5-pil",
1151 .of_match_table = q6v5_of_match,
1152 },
1153 };
1154 module_platform_driver(q6v5_driver);
1155
1156 MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
1157 MODULE_LICENSE("GPL v2");
1158