1 /*
2 * Qualcomm Peripheral Image Loader
3 *
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/regmap.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/remoteproc.h>
30 #include <linux/reset.h>
31 #include <linux/soc/qcom/smem.h>
32 #include <linux/soc/qcom/smem_state.h>
33
34 #include "remoteproc_internal.h"
35 #include "qcom_mdt_loader.h"
36
37 #include <linux/qcom_scm.h>
38
39 #define MBA_FIRMWARE_NAME "mba.b00"
40 #define MPSS_FIRMWARE_NAME "modem.mdt"
41
42 #define MPSS_CRASH_REASON_SMEM 421
43
44 /* RMB Status Register Values */
45 #define RMB_PBL_SUCCESS 0x1
46
47 #define RMB_MBA_XPU_UNLOCKED 0x1
48 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
49 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
50 #define RMB_MBA_AUTH_COMPLETE 0x4
51
52 /* PBL/MBA interface registers */
53 #define RMB_MBA_IMAGE_REG 0x00
54 #define RMB_PBL_STATUS_REG 0x04
55 #define RMB_MBA_COMMAND_REG 0x08
56 #define RMB_MBA_STATUS_REG 0x0C
57 #define RMB_PMI_META_DATA_REG 0x10
58 #define RMB_PMI_CODE_START_REG 0x14
59 #define RMB_PMI_CODE_LENGTH_REG 0x18
60
61 #define RMB_CMD_META_DATA_READY 0x1
62 #define RMB_CMD_LOAD_READY 0x2
63
64 /* QDSP6SS Register Offsets */
65 #define QDSP6SS_RESET_REG 0x014
66 #define QDSP6SS_GFMUX_CTL_REG 0x020
67 #define QDSP6SS_PWR_CTL_REG 0x030
68
69 /* AXI Halt Register Offsets */
70 #define AXI_HALTREQ_REG 0x0
71 #define AXI_HALTACK_REG 0x4
72 #define AXI_IDLE_REG 0x8
73
74 #define HALT_ACK_TIMEOUT_MS 100
75
76 /* QDSP6SS_RESET */
77 #define Q6SS_STOP_CORE BIT(0)
78 #define Q6SS_CORE_ARES BIT(1)
79 #define Q6SS_BUS_ARES_ENABLE BIT(2)
80
81 /* QDSP6SS_GFMUX_CTL */
82 #define Q6SS_CLK_ENABLE BIT(1)
83
84 /* QDSP6SS_PWR_CTL */
85 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
86 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
87 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
88 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
89 #define Q6SS_ETB_SLP_NRET_N BIT(17)
90 #define Q6SS_L2DATA_STBY_N BIT(18)
91 #define Q6SS_SLP_RET_N BIT(19)
92 #define Q6SS_CLAMP_IO BIT(20)
93 #define QDSS_BHS_ON BIT(21)
94 #define QDSS_LDO_BYP BIT(22)
95
96 struct q6v5 {
97 struct device *dev;
98 struct rproc *rproc;
99
100 void __iomem *reg_base;
101 void __iomem *rmb_base;
102
103 struct regmap *halt_map;
104 u32 halt_q6;
105 u32 halt_modem;
106 u32 halt_nc;
107
108 struct reset_control *mss_restart;
109
110 struct qcom_smem_state *state;
111 unsigned stop_bit;
112
113 struct regulator_bulk_data supply[4];
114
115 struct clk *ahb_clk;
116 struct clk *axi_clk;
117 struct clk *rom_clk;
118
119 struct completion start_done;
120 struct completion stop_done;
121 bool running;
122
123 phys_addr_t mba_phys;
124 void *mba_region;
125 size_t mba_size;
126
127 phys_addr_t mpss_phys;
128 phys_addr_t mpss_reloc;
129 void *mpss_region;
130 size_t mpss_size;
131 };
132
133 enum {
134 Q6V5_SUPPLY_CX,
135 Q6V5_SUPPLY_MX,
136 Q6V5_SUPPLY_MSS,
137 Q6V5_SUPPLY_PLL,
138 };
139
q6v5_regulator_init(struct q6v5 * qproc)140 static int q6v5_regulator_init(struct q6v5 *qproc)
141 {
142 int ret;
143
144 qproc->supply[Q6V5_SUPPLY_CX].supply = "cx";
145 qproc->supply[Q6V5_SUPPLY_MX].supply = "mx";
146 qproc->supply[Q6V5_SUPPLY_MSS].supply = "mss";
147 qproc->supply[Q6V5_SUPPLY_PLL].supply = "pll";
148
149 ret = devm_regulator_bulk_get(qproc->dev,
150 ARRAY_SIZE(qproc->supply), qproc->supply);
151 if (ret < 0) {
152 dev_err(qproc->dev, "failed to get supplies\n");
153 return ret;
154 }
155
156 regulator_set_load(qproc->supply[Q6V5_SUPPLY_CX].consumer, 100000);
157 regulator_set_load(qproc->supply[Q6V5_SUPPLY_MSS].consumer, 100000);
158 regulator_set_load(qproc->supply[Q6V5_SUPPLY_PLL].consumer, 10000);
159
160 return 0;
161 }
162
q6v5_regulator_enable(struct q6v5 * qproc)163 static int q6v5_regulator_enable(struct q6v5 *qproc)
164 {
165 struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer;
166 struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer;
167 int ret;
168
169 /* TODO: Q6V5_SUPPLY_CX is supposed to be set to super-turbo here */
170
171 ret = regulator_set_voltage(mx, 1050000, INT_MAX);
172 if (ret)
173 return ret;
174
175 regulator_set_voltage(mss, 1000000, 1150000);
176
177 return regulator_bulk_enable(ARRAY_SIZE(qproc->supply), qproc->supply);
178 }
179
q6v5_regulator_disable(struct q6v5 * qproc)180 static void q6v5_regulator_disable(struct q6v5 *qproc)
181 {
182 struct regulator *mss = qproc->supply[Q6V5_SUPPLY_MSS].consumer;
183 struct regulator *mx = qproc->supply[Q6V5_SUPPLY_MX].consumer;
184
185 /* TODO: Q6V5_SUPPLY_CX corner votes should be released */
186
187 regulator_bulk_disable(ARRAY_SIZE(qproc->supply), qproc->supply);
188 regulator_set_voltage(mx, 0, INT_MAX);
189 regulator_set_voltage(mss, 0, 1150000);
190 }
191
q6v5_load(struct rproc * rproc,const struct firmware * fw)192 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
193 {
194 struct q6v5 *qproc = rproc->priv;
195
196 memcpy(qproc->mba_region, fw->data, fw->size);
197
198 return 0;
199 }
200
201 static const struct rproc_fw_ops q6v5_fw_ops = {
202 .find_rsc_table = qcom_mdt_find_rsc_table,
203 .load = q6v5_load,
204 };
205
q6v5_rmb_pbl_wait(struct q6v5 * qproc,int ms)206 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
207 {
208 unsigned long timeout;
209 s32 val;
210
211 timeout = jiffies + msecs_to_jiffies(ms);
212 for (;;) {
213 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
214 if (val)
215 break;
216
217 if (time_after(jiffies, timeout))
218 return -ETIMEDOUT;
219
220 msleep(1);
221 }
222
223 return val;
224 }
225
q6v5_rmb_mba_wait(struct q6v5 * qproc,u32 status,int ms)226 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
227 {
228
229 unsigned long timeout;
230 s32 val;
231
232 timeout = jiffies + msecs_to_jiffies(ms);
233 for (;;) {
234 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
235 if (val < 0)
236 break;
237
238 if (!status && val)
239 break;
240 else if (status && val == status)
241 break;
242
243 if (time_after(jiffies, timeout))
244 return -ETIMEDOUT;
245
246 msleep(1);
247 }
248
249 return val;
250 }
251
q6v5proc_reset(struct q6v5 * qproc)252 static int q6v5proc_reset(struct q6v5 *qproc)
253 {
254 u32 val;
255 int ret;
256
257 /* Assert resets, stop core */
258 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
259 val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
260 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
261
262 /* Enable power block headswitch, and wait for it to stabilize */
263 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
264 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
265 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
266 udelay(1);
267
268 /*
269 * Turn on memories. L2 banks should be done individually
270 * to minimize inrush current.
271 */
272 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
273 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
274 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
275 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
276 val |= Q6SS_L2DATA_SLP_NRET_N_2;
277 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
278 val |= Q6SS_L2DATA_SLP_NRET_N_1;
279 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
280 val |= Q6SS_L2DATA_SLP_NRET_N_0;
281 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
282
283 /* Remove IO clamp */
284 val &= ~Q6SS_CLAMP_IO;
285 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
286
287 /* Bring core out of reset */
288 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
289 val &= ~Q6SS_CORE_ARES;
290 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
291
292 /* Turn on core clock */
293 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
294 val |= Q6SS_CLK_ENABLE;
295 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
296
297 /* Start core execution */
298 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
299 val &= ~Q6SS_STOP_CORE;
300 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
301
302 /* Wait for PBL status */
303 ret = q6v5_rmb_pbl_wait(qproc, 1000);
304 if (ret == -ETIMEDOUT) {
305 dev_err(qproc->dev, "PBL boot timed out\n");
306 } else if (ret != RMB_PBL_SUCCESS) {
307 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
308 ret = -EINVAL;
309 } else {
310 ret = 0;
311 }
312
313 return ret;
314 }
315
q6v5proc_halt_axi_port(struct q6v5 * qproc,struct regmap * halt_map,u32 offset)316 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
317 struct regmap *halt_map,
318 u32 offset)
319 {
320 unsigned long timeout;
321 unsigned int val;
322 int ret;
323
324 /* Check if we're already idle */
325 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
326 if (!ret && val)
327 return;
328
329 /* Assert halt request */
330 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
331
332 /* Wait for halt */
333 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
334 for (;;) {
335 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
336 if (ret || val || time_after(jiffies, timeout))
337 break;
338
339 msleep(1);
340 }
341
342 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
343 if (ret || !val)
344 dev_err(qproc->dev, "port failed halt\n");
345
346 /* Clear halt request (port will remain halted until reset) */
347 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
348 }
349
q6v5_mpss_init_image(struct q6v5 * qproc,const struct firmware * fw)350 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
351 {
352 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
353 dma_addr_t phys;
354 void *ptr;
355 int ret;
356
357 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
358 if (!ptr) {
359 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
360 return -ENOMEM;
361 }
362
363 memcpy(ptr, fw->data, fw->size);
364
365 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
366 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
367
368 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
369 if (ret == -ETIMEDOUT)
370 dev_err(qproc->dev, "MPSS header authentication timed out\n");
371 else if (ret < 0)
372 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
373
374 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
375
376 return ret < 0 ? ret : 0;
377 }
378
q6v5_mpss_validate(struct q6v5 * qproc,const struct firmware * fw)379 static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw)
380 {
381 const struct elf32_phdr *phdrs;
382 const struct elf32_phdr *phdr;
383 struct elf32_hdr *ehdr;
384 phys_addr_t boot_addr;
385 phys_addr_t fw_addr;
386 bool relocate;
387 size_t size;
388 int ret;
389 int i;
390
391 ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
392 if (ret) {
393 dev_err(qproc->dev, "failed to parse mdt header\n");
394 return ret;
395 }
396
397 if (relocate)
398 boot_addr = qproc->mpss_phys;
399 else
400 boot_addr = fw_addr;
401
402 ehdr = (struct elf32_hdr *)fw->data;
403 phdrs = (struct elf32_phdr *)(ehdr + 1);
404 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
405 phdr = &phdrs[i];
406
407 if (phdr->p_type != PT_LOAD)
408 continue;
409
410 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
411 continue;
412
413 if (!phdr->p_memsz)
414 continue;
415
416 size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
417 if (!size) {
418 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
419 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
420 }
421
422 size += phdr->p_memsz;
423 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
424 }
425
426 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
427 if (ret == -ETIMEDOUT)
428 dev_err(qproc->dev, "MPSS authentication timed out\n");
429 else if (ret < 0)
430 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
431
432 return ret < 0 ? ret : 0;
433 }
434
q6v5_mpss_load(struct q6v5 * qproc)435 static int q6v5_mpss_load(struct q6v5 *qproc)
436 {
437 const struct firmware *fw;
438 phys_addr_t fw_addr;
439 bool relocate;
440 int ret;
441
442 ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev);
443 if (ret < 0) {
444 dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n");
445 return ret;
446 }
447
448 ret = qcom_mdt_parse(fw, &fw_addr, NULL, &relocate);
449 if (ret) {
450 dev_err(qproc->dev, "failed to parse mdt header\n");
451 goto release_firmware;
452 }
453
454 if (relocate)
455 qproc->mpss_reloc = fw_addr;
456
457 /* Initialize the RMB validator */
458 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
459
460 ret = q6v5_mpss_init_image(qproc, fw);
461 if (ret)
462 goto release_firmware;
463
464 ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME);
465 if (ret)
466 goto release_firmware;
467
468 ret = q6v5_mpss_validate(qproc, fw);
469
470 release_firmware:
471 release_firmware(fw);
472
473 return ret < 0 ? ret : 0;
474 }
475
q6v5_start(struct rproc * rproc)476 static int q6v5_start(struct rproc *rproc)
477 {
478 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
479 int ret;
480
481 ret = q6v5_regulator_enable(qproc);
482 if (ret) {
483 dev_err(qproc->dev, "failed to enable supplies\n");
484 return ret;
485 }
486
487 ret = reset_control_deassert(qproc->mss_restart);
488 if (ret) {
489 dev_err(qproc->dev, "failed to deassert mss restart\n");
490 goto disable_vdd;
491 }
492
493 ret = clk_prepare_enable(qproc->ahb_clk);
494 if (ret)
495 goto assert_reset;
496
497 ret = clk_prepare_enable(qproc->axi_clk);
498 if (ret)
499 goto disable_ahb_clk;
500
501 ret = clk_prepare_enable(qproc->rom_clk);
502 if (ret)
503 goto disable_axi_clk;
504
505 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
506
507 ret = q6v5proc_reset(qproc);
508 if (ret)
509 goto halt_axi_ports;
510
511 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
512 if (ret == -ETIMEDOUT) {
513 dev_err(qproc->dev, "MBA boot timed out\n");
514 goto halt_axi_ports;
515 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
516 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
517 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
518 ret = -EINVAL;
519 goto halt_axi_ports;
520 }
521
522 dev_info(qproc->dev, "MBA booted, loading mpss\n");
523
524 ret = q6v5_mpss_load(qproc);
525 if (ret)
526 goto halt_axi_ports;
527
528 ret = wait_for_completion_timeout(&qproc->start_done,
529 msecs_to_jiffies(5000));
530 if (ret == 0) {
531 dev_err(qproc->dev, "start timed out\n");
532 ret = -ETIMEDOUT;
533 goto halt_axi_ports;
534 }
535
536 qproc->running = true;
537
538 /* TODO: All done, release the handover resources */
539
540 return 0;
541
542 halt_axi_ports:
543 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
544 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
545 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
546
547 clk_disable_unprepare(qproc->rom_clk);
548 disable_axi_clk:
549 clk_disable_unprepare(qproc->axi_clk);
550 disable_ahb_clk:
551 clk_disable_unprepare(qproc->ahb_clk);
552 assert_reset:
553 reset_control_assert(qproc->mss_restart);
554 disable_vdd:
555 q6v5_regulator_disable(qproc);
556
557 return ret;
558 }
559
q6v5_stop(struct rproc * rproc)560 static int q6v5_stop(struct rproc *rproc)
561 {
562 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
563 int ret;
564
565 qproc->running = false;
566
567 qcom_smem_state_update_bits(qproc->state,
568 BIT(qproc->stop_bit), BIT(qproc->stop_bit));
569
570 ret = wait_for_completion_timeout(&qproc->stop_done,
571 msecs_to_jiffies(5000));
572 if (ret == 0)
573 dev_err(qproc->dev, "timed out on wait\n");
574
575 qcom_smem_state_update_bits(qproc->state, BIT(qproc->stop_bit), 0);
576
577 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
578 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
579 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
580
581 reset_control_assert(qproc->mss_restart);
582 clk_disable_unprepare(qproc->rom_clk);
583 clk_disable_unprepare(qproc->axi_clk);
584 clk_disable_unprepare(qproc->ahb_clk);
585 q6v5_regulator_disable(qproc);
586
587 return 0;
588 }
589
q6v5_da_to_va(struct rproc * rproc,u64 da,int len)590 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
591 {
592 struct q6v5 *qproc = rproc->priv;
593 int offset;
594
595 offset = da - qproc->mpss_reloc;
596 if (offset < 0 || offset + len > qproc->mpss_size)
597 return NULL;
598
599 return qproc->mpss_region + offset;
600 }
601
602 static const struct rproc_ops q6v5_ops = {
603 .start = q6v5_start,
604 .stop = q6v5_stop,
605 .da_to_va = q6v5_da_to_va,
606 };
607
q6v5_wdog_interrupt(int irq,void * dev)608 static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
609 {
610 struct q6v5 *qproc = dev;
611 size_t len;
612 char *msg;
613
614 /* Sometimes the stop triggers a watchdog rather than a stop-ack */
615 if (!qproc->running) {
616 complete(&qproc->stop_done);
617 return IRQ_HANDLED;
618 }
619
620 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
621 if (!IS_ERR(msg) && len > 0 && msg[0])
622 dev_err(qproc->dev, "watchdog received: %s\n", msg);
623 else
624 dev_err(qproc->dev, "watchdog without message\n");
625
626 rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
627
628 if (!IS_ERR(msg))
629 msg[0] = '\0';
630
631 return IRQ_HANDLED;
632 }
633
q6v5_fatal_interrupt(int irq,void * dev)634 static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
635 {
636 struct q6v5 *qproc = dev;
637 size_t len;
638 char *msg;
639
640 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
641 if (!IS_ERR(msg) && len > 0 && msg[0])
642 dev_err(qproc->dev, "fatal error received: %s\n", msg);
643 else
644 dev_err(qproc->dev, "fatal error without message\n");
645
646 rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
647
648 if (!IS_ERR(msg))
649 msg[0] = '\0';
650
651 return IRQ_HANDLED;
652 }
653
q6v5_handover_interrupt(int irq,void * dev)654 static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
655 {
656 struct q6v5 *qproc = dev;
657
658 complete(&qproc->start_done);
659 return IRQ_HANDLED;
660 }
661
q6v5_stop_ack_interrupt(int irq,void * dev)662 static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
663 {
664 struct q6v5 *qproc = dev;
665
666 complete(&qproc->stop_done);
667 return IRQ_HANDLED;
668 }
669
q6v5_init_mem(struct q6v5 * qproc,struct platform_device * pdev)670 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
671 {
672 struct of_phandle_args args;
673 struct resource *res;
674 int ret;
675
676 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
677 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
678 if (IS_ERR(qproc->reg_base))
679 return PTR_ERR(qproc->reg_base);
680
681 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
682 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
683 if (IS_ERR(qproc->rmb_base))
684 return PTR_ERR(qproc->rmb_base);
685
686 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
687 "qcom,halt-regs", 3, 0, &args);
688 if (ret < 0) {
689 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
690 return -EINVAL;
691 }
692
693 qproc->halt_map = syscon_node_to_regmap(args.np);
694 of_node_put(args.np);
695 if (IS_ERR(qproc->halt_map))
696 return PTR_ERR(qproc->halt_map);
697
698 qproc->halt_q6 = args.args[0];
699 qproc->halt_modem = args.args[1];
700 qproc->halt_nc = args.args[2];
701
702 return 0;
703 }
704
q6v5_init_clocks(struct q6v5 * qproc)705 static int q6v5_init_clocks(struct q6v5 *qproc)
706 {
707 qproc->ahb_clk = devm_clk_get(qproc->dev, "iface");
708 if (IS_ERR(qproc->ahb_clk)) {
709 dev_err(qproc->dev, "failed to get iface clock\n");
710 return PTR_ERR(qproc->ahb_clk);
711 }
712
713 qproc->axi_clk = devm_clk_get(qproc->dev, "bus");
714 if (IS_ERR(qproc->axi_clk)) {
715 dev_err(qproc->dev, "failed to get bus clock\n");
716 return PTR_ERR(qproc->axi_clk);
717 }
718
719 qproc->rom_clk = devm_clk_get(qproc->dev, "mem");
720 if (IS_ERR(qproc->rom_clk)) {
721 dev_err(qproc->dev, "failed to get mem clock\n");
722 return PTR_ERR(qproc->rom_clk);
723 }
724
725 return 0;
726 }
727
q6v5_init_reset(struct q6v5 * qproc)728 static int q6v5_init_reset(struct q6v5 *qproc)
729 {
730 qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
731 if (IS_ERR(qproc->mss_restart)) {
732 dev_err(qproc->dev, "failed to acquire mss restart\n");
733 return PTR_ERR(qproc->mss_restart);
734 }
735
736 return 0;
737 }
738
q6v5_request_irq(struct q6v5 * qproc,struct platform_device * pdev,const char * name,irq_handler_t thread_fn)739 static int q6v5_request_irq(struct q6v5 *qproc,
740 struct platform_device *pdev,
741 const char *name,
742 irq_handler_t thread_fn)
743 {
744 int ret;
745
746 ret = platform_get_irq_byname(pdev, name);
747 if (ret < 0) {
748 dev_err(&pdev->dev, "no %s IRQ defined\n", name);
749 return ret;
750 }
751
752 ret = devm_request_threaded_irq(&pdev->dev, ret,
753 NULL, thread_fn,
754 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
755 "q6v5", qproc);
756 if (ret)
757 dev_err(&pdev->dev, "request %s IRQ failed\n", name);
758
759 return ret;
760 }
761
q6v5_alloc_memory_region(struct q6v5 * qproc)762 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
763 {
764 struct device_node *child;
765 struct device_node *node;
766 struct resource r;
767 int ret;
768
769 child = of_get_child_by_name(qproc->dev->of_node, "mba");
770 node = of_parse_phandle(child, "memory-region", 0);
771 ret = of_address_to_resource(node, 0, &r);
772 if (ret) {
773 dev_err(qproc->dev, "unable to resolve mba region\n");
774 return ret;
775 }
776
777 qproc->mba_phys = r.start;
778 qproc->mba_size = resource_size(&r);
779 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
780 if (!qproc->mba_region) {
781 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
782 &r.start, qproc->mba_size);
783 return -EBUSY;
784 }
785
786 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
787 node = of_parse_phandle(child, "memory-region", 0);
788 ret = of_address_to_resource(node, 0, &r);
789 if (ret) {
790 dev_err(qproc->dev, "unable to resolve mpss region\n");
791 return ret;
792 }
793
794 qproc->mpss_phys = qproc->mpss_reloc = r.start;
795 qproc->mpss_size = resource_size(&r);
796 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
797 if (!qproc->mpss_region) {
798 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
799 &r.start, qproc->mpss_size);
800 return -EBUSY;
801 }
802
803 return 0;
804 }
805
q6v5_probe(struct platform_device * pdev)806 static int q6v5_probe(struct platform_device *pdev)
807 {
808 struct q6v5 *qproc;
809 struct rproc *rproc;
810 int ret;
811
812 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
813 MBA_FIRMWARE_NAME, sizeof(*qproc));
814 if (!rproc) {
815 dev_err(&pdev->dev, "failed to allocate rproc\n");
816 return -ENOMEM;
817 }
818
819 rproc->fw_ops = &q6v5_fw_ops;
820
821 qproc = (struct q6v5 *)rproc->priv;
822 qproc->dev = &pdev->dev;
823 qproc->rproc = rproc;
824 platform_set_drvdata(pdev, qproc);
825
826 init_completion(&qproc->start_done);
827 init_completion(&qproc->stop_done);
828
829 ret = q6v5_init_mem(qproc, pdev);
830 if (ret)
831 goto free_rproc;
832
833 ret = q6v5_alloc_memory_region(qproc);
834 if (ret)
835 goto free_rproc;
836
837 ret = q6v5_init_clocks(qproc);
838 if (ret)
839 goto free_rproc;
840
841 ret = q6v5_regulator_init(qproc);
842 if (ret)
843 goto free_rproc;
844
845 ret = q6v5_init_reset(qproc);
846 if (ret)
847 goto free_rproc;
848
849 ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
850 if (ret < 0)
851 goto free_rproc;
852
853 ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
854 if (ret < 0)
855 goto free_rproc;
856
857 ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
858 if (ret < 0)
859 goto free_rproc;
860
861 ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
862 if (ret < 0)
863 goto free_rproc;
864
865 qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
866 if (IS_ERR(qproc->state)) {
867 ret = PTR_ERR(qproc->state);
868 goto free_rproc;
869 }
870
871 ret = rproc_add(rproc);
872 if (ret)
873 goto free_rproc;
874
875 return 0;
876
877 free_rproc:
878 rproc_free(rproc);
879
880 return ret;
881 }
882
q6v5_remove(struct platform_device * pdev)883 static int q6v5_remove(struct platform_device *pdev)
884 {
885 struct q6v5 *qproc = platform_get_drvdata(pdev);
886
887 rproc_del(qproc->rproc);
888 rproc_free(qproc->rproc);
889
890 return 0;
891 }
892
893 static const struct of_device_id q6v5_of_match[] = {
894 { .compatible = "qcom,q6v5-pil", },
895 { },
896 };
897
898 static struct platform_driver q6v5_driver = {
899 .probe = q6v5_probe,
900 .remove = q6v5_remove,
901 .driver = {
902 .name = "qcom-q6v5-pil",
903 .of_match_table = q6v5_of_match,
904 },
905 };
906 module_platform_driver(q6v5_driver);
907
908 MODULE_DESCRIPTION("Peripheral Image Loader for Hexagon");
909 MODULE_LICENSE("GPL v2");
910