1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * TI K3 DSP Remote Processor(s) driver
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Suman Anna <s-anna@ti.com>
7 */
8
9 #include <linux/io.h>
10 #include <linux/mailbox_client.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_reserved_mem.h>
14 #include <linux/omap-mailbox.h>
15 #include <linux/platform_device.h>
16 #include <linux/remoteproc.h>
17 #include <linux/reset.h>
18 #include <linux/slab.h>
19
20 #include "omap_remoteproc.h"
21 #include "remoteproc_internal.h"
22 #include "ti_sci_proc.h"
23
24 #define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
25
26 /**
27 * struct k3_dsp_mem - internal memory structure
28 * @cpu_addr: MPU virtual address of the memory region
29 * @bus_addr: Bus address used to access the memory region
30 * @dev_addr: Device address of the memory region from DSP view
31 * @size: Size of the memory region
32 */
33 struct k3_dsp_mem {
34 void __iomem *cpu_addr;
35 phys_addr_t bus_addr;
36 u32 dev_addr;
37 size_t size;
38 };
39
40 /**
41 * struct k3_dsp_mem_data - memory definitions for a DSP
42 * @name: name for this memory entry
43 * @dev_addr: device address for the memory entry
44 */
45 struct k3_dsp_mem_data {
46 const char *name;
47 const u32 dev_addr;
48 };
49
50 /**
51 * struct k3_dsp_dev_data - device data structure for a DSP
52 * @mems: pointer to memory definitions for a DSP
53 * @num_mems: number of memory regions in @mems
54 * @boot_align_addr: boot vector address alignment granularity
55 * @uses_lreset: flag to denote the need for local reset management
56 */
57 struct k3_dsp_dev_data {
58 const struct k3_dsp_mem_data *mems;
59 u32 num_mems;
60 u32 boot_align_addr;
61 bool uses_lreset;
62 };
63
64 /**
65 * struct k3_dsp_rproc - k3 DSP remote processor driver structure
66 * @dev: cached device pointer
67 * @rproc: remoteproc device handle
68 * @mem: internal memory regions data
69 * @num_mems: number of internal memory regions
70 * @rmem: reserved memory regions data
71 * @num_rmems: number of reserved memory regions
72 * @reset: reset control handle
73 * @data: pointer to DSP-specific device data
74 * @tsp: TI-SCI processor control handle
75 * @ti_sci: TI-SCI handle
76 * @ti_sci_id: TI-SCI device identifier
77 * @mbox: mailbox channel handle
78 * @client: mailbox client to request the mailbox channel
79 */
80 struct k3_dsp_rproc {
81 struct device *dev;
82 struct rproc *rproc;
83 struct k3_dsp_mem *mem;
84 int num_mems;
85 struct k3_dsp_mem *rmem;
86 int num_rmems;
87 struct reset_control *reset;
88 const struct k3_dsp_dev_data *data;
89 struct ti_sci_proc *tsp;
90 const struct ti_sci_handle *ti_sci;
91 u32 ti_sci_id;
92 struct mbox_chan *mbox;
93 struct mbox_client client;
94 };
95
96 /**
97 * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
98 * @client: mailbox client pointer used for requesting the mailbox channel
99 * @data: mailbox payload
100 *
101 * This handler is invoked by the OMAP mailbox driver whenever a mailbox
102 * message is received. Usually, the mailbox payload simply contains
103 * the index of the virtqueue that is kicked by the remote processor,
104 * and we let remoteproc core handle it.
105 *
106 * In addition to virtqueue indices, we also have some out-of-band values
107 * that indicate different events. Those values are deliberately very
108 * large so they don't coincide with virtqueue indices.
109 */
k3_dsp_rproc_mbox_callback(struct mbox_client * client,void * data)110 static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
111 {
112 struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
113 client);
114 struct device *dev = kproc->rproc->dev.parent;
115 const char *name = kproc->rproc->name;
116 u32 msg = omap_mbox_message(data);
117
118 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
119
120 switch (msg) {
121 case RP_MBOX_CRASH:
122 /*
123 * remoteproc detected an exception, but error recovery is not
124 * supported. So, just log this for now
125 */
126 dev_err(dev, "K3 DSP rproc %s crashed\n", name);
127 break;
128 case RP_MBOX_ECHO_REPLY:
129 dev_info(dev, "received echo reply from %s\n", name);
130 break;
131 default:
132 /* silently handle all other valid messages */
133 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
134 return;
135 if (msg > kproc->rproc->max_notifyid) {
136 dev_dbg(dev, "dropping unknown message 0x%x", msg);
137 return;
138 }
139 /* msg contains the index of the triggered vring */
140 if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
141 dev_dbg(dev, "no message was found in vqid %d\n", msg);
142 }
143 }
144
145 /*
146 * Kick the remote processor to notify about pending unprocessed messages.
147 * The vqid usage is not used and is inconsequential, as the kick is performed
148 * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
149 * the remote processor is expected to process both its Tx and Rx virtqueues.
150 */
k3_dsp_rproc_kick(struct rproc * rproc,int vqid)151 static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
152 {
153 struct k3_dsp_rproc *kproc = rproc->priv;
154 struct device *dev = rproc->dev.parent;
155 mbox_msg_t msg = (mbox_msg_t)vqid;
156 int ret;
157
158 /* send the index of the triggered virtqueue in the mailbox payload */
159 ret = mbox_send_message(kproc->mbox, (void *)msg);
160 if (ret < 0)
161 dev_err(dev, "failed to send mailbox message (%pe)\n",
162 ERR_PTR(ret));
163 }
164
165 /* Put the DSP processor into reset */
k3_dsp_rproc_reset(struct k3_dsp_rproc * kproc)166 static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
167 {
168 struct device *dev = kproc->dev;
169 int ret;
170
171 ret = reset_control_assert(kproc->reset);
172 if (ret) {
173 dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
174 return ret;
175 }
176
177 if (kproc->data->uses_lreset)
178 return ret;
179
180 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
181 kproc->ti_sci_id);
182 if (ret) {
183 dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
184 if (reset_control_deassert(kproc->reset))
185 dev_warn(dev, "local-reset deassert back failed\n");
186 }
187
188 return ret;
189 }
190
191 /* Release the DSP processor from reset */
k3_dsp_rproc_release(struct k3_dsp_rproc * kproc)192 static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
193 {
194 struct device *dev = kproc->dev;
195 int ret;
196
197 if (kproc->data->uses_lreset)
198 goto lreset;
199
200 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
201 kproc->ti_sci_id);
202 if (ret) {
203 dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
204 return ret;
205 }
206
207 lreset:
208 ret = reset_control_deassert(kproc->reset);
209 if (ret) {
210 dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
211 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
212 kproc->ti_sci_id))
213 dev_warn(dev, "module-reset assert back failed\n");
214 }
215
216 return ret;
217 }
218
k3_dsp_rproc_request_mbox(struct rproc * rproc)219 static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
220 {
221 struct k3_dsp_rproc *kproc = rproc->priv;
222 struct mbox_client *client = &kproc->client;
223 struct device *dev = kproc->dev;
224 int ret;
225
226 client->dev = dev;
227 client->tx_done = NULL;
228 client->rx_callback = k3_dsp_rproc_mbox_callback;
229 client->tx_block = false;
230 client->knows_txdone = false;
231
232 kproc->mbox = mbox_request_channel(client, 0);
233 if (IS_ERR(kproc->mbox))
234 return dev_err_probe(dev, PTR_ERR(kproc->mbox),
235 "mbox_request_channel failed\n");
236
237 /*
238 * Ping the remote processor, this is only for sanity-sake for now;
239 * there is no functional effect whatsoever.
240 *
241 * Note that the reply will _not_ arrive immediately: this message
242 * will wait in the mailbox fifo until the remote processor is booted.
243 */
244 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
245 if (ret < 0) {
246 dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
247 mbox_free_channel(kproc->mbox);
248 return ret;
249 }
250
251 return 0;
252 }
253 /*
254 * The C66x DSP cores have a local reset that affects only the CPU, and a
255 * generic module reset that powers on the device and allows the DSP internal
256 * memories to be accessed while the local reset is asserted. This function is
257 * used to release the global reset on C66x DSPs to allow loading into the DSP
258 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
259 * firmware loading, and is followed by the .start() ops after loading to
260 * actually let the C66x DSP cores run. This callback is invoked only in
261 * remoteproc mode.
262 */
k3_dsp_rproc_prepare(struct rproc * rproc)263 static int k3_dsp_rproc_prepare(struct rproc *rproc)
264 {
265 struct k3_dsp_rproc *kproc = rproc->priv;
266 struct device *dev = kproc->dev;
267 int ret;
268
269 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
270 kproc->ti_sci_id);
271 if (ret)
272 dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading (%pe)\n",
273 ERR_PTR(ret));
274
275 return ret;
276 }
277
278 /*
279 * This function implements the .unprepare() ops and performs the complimentary
280 * operations to that of the .prepare() ops. The function is used to assert the
281 * global reset on applicable C66x cores. This completes the second portion of
282 * powering down the C66x DSP cores. The cores themselves are only halted in the
283 * .stop() callback through the local reset, and the .unprepare() ops is invoked
284 * by the remoteproc core after the remoteproc is stopped to balance the global
285 * reset. This callback is invoked only in remoteproc mode.
286 */
k3_dsp_rproc_unprepare(struct rproc * rproc)287 static int k3_dsp_rproc_unprepare(struct rproc *rproc)
288 {
289 struct k3_dsp_rproc *kproc = rproc->priv;
290 struct device *dev = kproc->dev;
291 int ret;
292
293 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
294 kproc->ti_sci_id);
295 if (ret)
296 dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
297
298 return ret;
299 }
300
301 /*
302 * Power up the DSP remote processor.
303 *
304 * This function will be invoked only after the firmware for this rproc
305 * was loaded, parsed successfully, and all of its resource requirements
306 * were met. This callback is invoked only in remoteproc mode.
307 */
k3_dsp_rproc_start(struct rproc * rproc)308 static int k3_dsp_rproc_start(struct rproc *rproc)
309 {
310 struct k3_dsp_rproc *kproc = rproc->priv;
311 struct device *dev = kproc->dev;
312 u32 boot_addr;
313 int ret;
314
315 boot_addr = rproc->bootaddr;
316 if (boot_addr & (kproc->data->boot_align_addr - 1)) {
317 dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
318 boot_addr, kproc->data->boot_align_addr);
319 return -EINVAL;
320 }
321
322 dev_dbg(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
323 ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
324 if (ret)
325 return ret;
326
327 ret = k3_dsp_rproc_release(kproc);
328 if (ret)
329 return ret;
330
331 return 0;
332 }
333
334 /*
335 * Stop the DSP remote processor.
336 *
337 * This function puts the DSP processor into reset, and finishes processing
338 * of any pending messages. This callback is invoked only in remoteproc mode.
339 */
k3_dsp_rproc_stop(struct rproc * rproc)340 static int k3_dsp_rproc_stop(struct rproc *rproc)
341 {
342 struct k3_dsp_rproc *kproc = rproc->priv;
343
344 k3_dsp_rproc_reset(kproc);
345
346 return 0;
347 }
348
349 /*
350 * Attach to a running DSP remote processor (IPC-only mode)
351 *
352 * This rproc attach callback is a NOP. The remote processor is already booted,
353 * and all required resources have been acquired during probe routine, so there
354 * is no need to issue any TI-SCI commands to boot the DSP core. This callback
355 * is invoked only in IPC-only mode and exists because rproc_validate() checks
356 * for its existence.
357 */
k3_dsp_rproc_attach(struct rproc * rproc)358 static int k3_dsp_rproc_attach(struct rproc *rproc) { return 0; }
359
360 /*
361 * Detach from a running DSP remote processor (IPC-only mode)
362 *
363 * This rproc detach callback is a NOP. The DSP core is not stopped and will be
364 * left to continue to run its booted firmware. This callback is invoked only in
365 * IPC-only mode and exists for sanity sake.
366 */
k3_dsp_rproc_detach(struct rproc * rproc)367 static int k3_dsp_rproc_detach(struct rproc *rproc) { return 0; }
368
369 /*
370 * This function implements the .get_loaded_rsc_table() callback and is used
371 * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
372 * firmwares follow a design-by-contract approach and are expected to have the
373 * resource table at the base of the DDR region reserved for firmware usage.
374 * This provides flexibility for the remote processor to be booted by different
375 * bootloaders that may or may not have the ability to publish the resource table
376 * address and size through a DT property. This callback is invoked only in
377 * IPC-only mode.
378 */
k3_dsp_get_loaded_rsc_table(struct rproc * rproc,size_t * rsc_table_sz)379 static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
380 size_t *rsc_table_sz)
381 {
382 struct k3_dsp_rproc *kproc = rproc->priv;
383 struct device *dev = kproc->dev;
384
385 if (!kproc->rmem[0].cpu_addr) {
386 dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
387 return ERR_PTR(-ENOMEM);
388 }
389
390 /*
391 * NOTE: The resource table size is currently hard-coded to a maximum
392 * of 256 bytes. The most common resource table usage for K3 firmwares
393 * is to only have the vdev resource entry and an optional trace entry.
394 * The exact size could be computed based on resource table address, but
395 * the hard-coded value suffices to support the IPC-only mode.
396 */
397 *rsc_table_sz = 256;
398 return (struct resource_table *)kproc->rmem[0].cpu_addr;
399 }
400
401 /*
402 * Custom function to translate a DSP device address (internal RAMs only) to a
403 * kernel virtual address. The DSPs can access their RAMs at either an internal
404 * address visible only from a DSP, or at the SoC-level bus address. Both these
405 * addresses need to be looked through for translation. The translated addresses
406 * can be used either by the remoteproc core for loading (when using kernel
407 * remoteproc loader), or by any rpmsg bus drivers.
408 */
k3_dsp_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)409 static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
410 {
411 struct k3_dsp_rproc *kproc = rproc->priv;
412 void __iomem *va = NULL;
413 phys_addr_t bus_addr;
414 u32 dev_addr, offset;
415 size_t size;
416 int i;
417
418 if (len == 0)
419 return NULL;
420
421 for (i = 0; i < kproc->num_mems; i++) {
422 bus_addr = kproc->mem[i].bus_addr;
423 dev_addr = kproc->mem[i].dev_addr;
424 size = kproc->mem[i].size;
425
426 if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
427 /* handle DSP-view addresses */
428 if (da >= dev_addr &&
429 ((da + len) <= (dev_addr + size))) {
430 offset = da - dev_addr;
431 va = kproc->mem[i].cpu_addr + offset;
432 return (__force void *)va;
433 }
434 } else {
435 /* handle SoC-view addresses */
436 if (da >= bus_addr &&
437 (da + len) <= (bus_addr + size)) {
438 offset = da - bus_addr;
439 va = kproc->mem[i].cpu_addr + offset;
440 return (__force void *)va;
441 }
442 }
443 }
444
445 /* handle static DDR reserved memory regions */
446 for (i = 0; i < kproc->num_rmems; i++) {
447 dev_addr = kproc->rmem[i].dev_addr;
448 size = kproc->rmem[i].size;
449
450 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
451 offset = da - dev_addr;
452 va = kproc->rmem[i].cpu_addr + offset;
453 return (__force void *)va;
454 }
455 }
456
457 return NULL;
458 }
459
460 static const struct rproc_ops k3_dsp_rproc_ops = {
461 .start = k3_dsp_rproc_start,
462 .stop = k3_dsp_rproc_stop,
463 .kick = k3_dsp_rproc_kick,
464 .da_to_va = k3_dsp_rproc_da_to_va,
465 };
466
k3_dsp_rproc_of_get_memories(struct platform_device * pdev,struct k3_dsp_rproc * kproc)467 static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
468 struct k3_dsp_rproc *kproc)
469 {
470 const struct k3_dsp_dev_data *data = kproc->data;
471 struct device *dev = &pdev->dev;
472 struct resource *res;
473 int num_mems = 0;
474 int i;
475
476 num_mems = kproc->data->num_mems;
477 kproc->mem = devm_kcalloc(kproc->dev, num_mems,
478 sizeof(*kproc->mem), GFP_KERNEL);
479 if (!kproc->mem)
480 return -ENOMEM;
481
482 for (i = 0; i < num_mems; i++) {
483 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
484 data->mems[i].name);
485 if (!res) {
486 dev_err(dev, "found no memory resource for %s\n",
487 data->mems[i].name);
488 return -EINVAL;
489 }
490 if (!devm_request_mem_region(dev, res->start,
491 resource_size(res),
492 dev_name(dev))) {
493 dev_err(dev, "could not request %s region for resource\n",
494 data->mems[i].name);
495 return -EBUSY;
496 }
497
498 kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
499 resource_size(res));
500 if (!kproc->mem[i].cpu_addr) {
501 dev_err(dev, "failed to map %s memory\n",
502 data->mems[i].name);
503 return -ENOMEM;
504 }
505 kproc->mem[i].bus_addr = res->start;
506 kproc->mem[i].dev_addr = data->mems[i].dev_addr;
507 kproc->mem[i].size = resource_size(res);
508
509 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
510 data->mems[i].name, &kproc->mem[i].bus_addr,
511 kproc->mem[i].size, kproc->mem[i].cpu_addr,
512 kproc->mem[i].dev_addr);
513 }
514 kproc->num_mems = num_mems;
515
516 return 0;
517 }
518
k3_dsp_mem_release(void * data)519 static void k3_dsp_mem_release(void *data)
520 {
521 struct device *dev = data;
522
523 of_reserved_mem_device_release(dev);
524 }
525
k3_dsp_reserved_mem_init(struct k3_dsp_rproc * kproc)526 static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
527 {
528 struct device *dev = kproc->dev;
529 struct device_node *np = dev->of_node;
530 struct device_node *rmem_np;
531 struct reserved_mem *rmem;
532 int num_rmems;
533 int ret, i;
534
535 num_rmems = of_property_count_elems_of_size(np, "memory-region",
536 sizeof(phandle));
537 if (num_rmems < 0) {
538 dev_err(dev, "device does not reserved memory regions (%pe)\n",
539 ERR_PTR(num_rmems));
540 return -EINVAL;
541 }
542 if (num_rmems < 2) {
543 dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
544 num_rmems);
545 return -EINVAL;
546 }
547
548 /* use reserved memory region 0 for vring DMA allocations */
549 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
550 if (ret) {
551 dev_err(dev, "device cannot initialize DMA pool (%pe)\n",
552 ERR_PTR(ret));
553 return ret;
554 }
555 ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
556 if (ret)
557 return ret;
558
559 num_rmems--;
560 kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
561 if (!kproc->rmem)
562 return -ENOMEM;
563
564 /* use remaining reserved memory regions for static carveouts */
565 for (i = 0; i < num_rmems; i++) {
566 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
567 if (!rmem_np)
568 return -EINVAL;
569
570 rmem = of_reserved_mem_lookup(rmem_np);
571 of_node_put(rmem_np);
572 if (!rmem)
573 return -EINVAL;
574
575 kproc->rmem[i].bus_addr = rmem->base;
576 /* 64-bit address regions currently not supported */
577 kproc->rmem[i].dev_addr = (u32)rmem->base;
578 kproc->rmem[i].size = rmem->size;
579 kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
580 if (!kproc->rmem[i].cpu_addr) {
581 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
582 i + 1, &rmem->base, &rmem->size);
583 return -ENOMEM;
584 }
585
586 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
587 i + 1, &kproc->rmem[i].bus_addr,
588 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
589 kproc->rmem[i].dev_addr);
590 }
591 kproc->num_rmems = num_rmems;
592
593 return 0;
594 }
595
k3_dsp_release_tsp(void * data)596 static void k3_dsp_release_tsp(void *data)
597 {
598 struct ti_sci_proc *tsp = data;
599
600 ti_sci_proc_release(tsp);
601 }
602
k3_dsp_rproc_probe(struct platform_device * pdev)603 static int k3_dsp_rproc_probe(struct platform_device *pdev)
604 {
605 struct device *dev = &pdev->dev;
606 struct device_node *np = dev->of_node;
607 const struct k3_dsp_dev_data *data;
608 struct k3_dsp_rproc *kproc;
609 struct rproc *rproc;
610 const char *fw_name;
611 bool p_state = false;
612 int ret = 0;
613
614 data = of_device_get_match_data(dev);
615 if (!data)
616 return -ENODEV;
617
618 ret = rproc_of_parse_firmware(dev, 0, &fw_name);
619 if (ret)
620 return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
621
622 rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops,
623 fw_name, sizeof(*kproc));
624 if (!rproc)
625 return -ENOMEM;
626
627 rproc->has_iommu = false;
628 rproc->recovery_disabled = true;
629 if (data->uses_lreset) {
630 rproc->ops->prepare = k3_dsp_rproc_prepare;
631 rproc->ops->unprepare = k3_dsp_rproc_unprepare;
632 }
633 kproc = rproc->priv;
634 kproc->rproc = rproc;
635 kproc->dev = dev;
636 kproc->data = data;
637
638 ret = k3_dsp_rproc_request_mbox(rproc);
639 if (ret)
640 return ret;
641
642 kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
643 if (IS_ERR(kproc->ti_sci))
644 return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
645 "failed to get ti-sci handle\n");
646
647 ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
648 if (ret)
649 return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
650
651 kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
652 if (IS_ERR(kproc->reset))
653 return dev_err_probe(dev, PTR_ERR(kproc->reset),
654 "failed to get reset\n");
655
656 kproc->tsp = ti_sci_proc_of_get_tsp(dev, kproc->ti_sci);
657 if (IS_ERR(kproc->tsp))
658 return dev_err_probe(dev, PTR_ERR(kproc->tsp),
659 "failed to construct ti-sci proc control\n");
660
661 ret = ti_sci_proc_request(kproc->tsp);
662 if (ret < 0) {
663 dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
664 return ret;
665 }
666 ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
667 if (ret)
668 return ret;
669
670 ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
671 if (ret)
672 return ret;
673
674 ret = k3_dsp_reserved_mem_init(kproc);
675 if (ret)
676 return dev_err_probe(dev, ret, "reserved memory init failed\n");
677
678 ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
679 NULL, &p_state);
680 if (ret)
681 return dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
682
683 /* configure J721E devices for either remoteproc or IPC-only mode */
684 if (p_state) {
685 dev_info(dev, "configured DSP for IPC-only mode\n");
686 rproc->state = RPROC_DETACHED;
687 /* override rproc ops with only required IPC-only mode ops */
688 rproc->ops->prepare = NULL;
689 rproc->ops->unprepare = NULL;
690 rproc->ops->start = NULL;
691 rproc->ops->stop = NULL;
692 rproc->ops->attach = k3_dsp_rproc_attach;
693 rproc->ops->detach = k3_dsp_rproc_detach;
694 rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
695 } else {
696 dev_info(dev, "configured DSP for remoteproc mode\n");
697 /*
698 * ensure the DSP local reset is asserted to ensure the DSP
699 * doesn't execute bogus code in .prepare() when the module
700 * reset is released.
701 */
702 if (data->uses_lreset) {
703 ret = reset_control_status(kproc->reset);
704 if (ret < 0) {
705 return dev_err_probe(dev, ret, "failed to get reset status\n");
706 } else if (ret == 0) {
707 dev_warn(dev, "local reset is deasserted for device\n");
708 k3_dsp_rproc_reset(kproc);
709 }
710 }
711 }
712
713 ret = devm_rproc_add(dev, rproc);
714 if (ret)
715 return dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
716
717 platform_set_drvdata(pdev, kproc);
718
719 return 0;
720 }
721
k3_dsp_rproc_remove(struct platform_device * pdev)722 static void k3_dsp_rproc_remove(struct platform_device *pdev)
723 {
724 struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
725 struct rproc *rproc = kproc->rproc;
726 struct device *dev = &pdev->dev;
727 int ret;
728
729 if (rproc->state == RPROC_ATTACHED) {
730 ret = rproc_detach(rproc);
731 if (ret)
732 dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
733 }
734
735 mbox_free_channel(kproc->mbox);
736 }
737
738 static const struct k3_dsp_mem_data c66_mems[] = {
739 { .name = "l2sram", .dev_addr = 0x800000 },
740 { .name = "l1pram", .dev_addr = 0xe00000 },
741 { .name = "l1dram", .dev_addr = 0xf00000 },
742 };
743
744 /* C71x cores only have a L1P Cache, there are no L1P SRAMs */
745 static const struct k3_dsp_mem_data c71_mems[] = {
746 { .name = "l2sram", .dev_addr = 0x800000 },
747 { .name = "l1dram", .dev_addr = 0xe00000 },
748 };
749
750 static const struct k3_dsp_mem_data c7xv_mems[] = {
751 { .name = "l2sram", .dev_addr = 0x800000 },
752 };
753
754 static const struct k3_dsp_dev_data c66_data = {
755 .mems = c66_mems,
756 .num_mems = ARRAY_SIZE(c66_mems),
757 .boot_align_addr = SZ_1K,
758 .uses_lreset = true,
759 };
760
761 static const struct k3_dsp_dev_data c71_data = {
762 .mems = c71_mems,
763 .num_mems = ARRAY_SIZE(c71_mems),
764 .boot_align_addr = SZ_2M,
765 .uses_lreset = false,
766 };
767
768 static const struct k3_dsp_dev_data c7xv_data = {
769 .mems = c7xv_mems,
770 .num_mems = ARRAY_SIZE(c7xv_mems),
771 .boot_align_addr = SZ_2M,
772 .uses_lreset = false,
773 };
774
775 static const struct of_device_id k3_dsp_of_match[] = {
776 { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
777 { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
778 { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
779 { .compatible = "ti,am62a-c7xv-dsp", .data = &c7xv_data, },
780 { /* sentinel */ },
781 };
782 MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
783
784 static struct platform_driver k3_dsp_rproc_driver = {
785 .probe = k3_dsp_rproc_probe,
786 .remove_new = k3_dsp_rproc_remove,
787 .driver = {
788 .name = "k3-dsp-rproc",
789 .of_match_table = k3_dsp_of_match,
790 },
791 };
792
793 module_platform_driver(k3_dsp_rproc_driver);
794
795 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
796 MODULE_LICENSE("GPL v2");
797 MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
798