1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
4 */
5
6 #include <linux/clk/tegra.h>
7 #include <linux/genalloc.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm.h>
14 #include <linux/semaphore.h>
15 #include <linux/sched/clock.h>
16
17 #include <soc/tegra/bpmp.h>
18 #include <soc/tegra/bpmp-abi.h>
19 #include <soc/tegra/ivc.h>
20
21 #include "bpmp-private.h"
22
23 #define MSG_ACK BIT(0)
24 #define MSG_RING BIT(1)
25 #define TAG_SZ 32
26
27 static inline struct tegra_bpmp *
mbox_client_to_bpmp(struct mbox_client * client)28 mbox_client_to_bpmp(struct mbox_client *client)
29 {
30 return container_of(client, struct tegra_bpmp, mbox.client);
31 }
32
33 static inline const struct tegra_bpmp_ops *
channel_to_ops(struct tegra_bpmp_channel * channel)34 channel_to_ops(struct tegra_bpmp_channel *channel)
35 {
36 struct tegra_bpmp *bpmp = channel->bpmp;
37
38 return bpmp->soc->ops;
39 }
40
tegra_bpmp_get(struct device * dev)41 struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
42 {
43 struct platform_device *pdev;
44 struct tegra_bpmp *bpmp;
45 struct device_node *np;
46
47 np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
48 if (!np)
49 return ERR_PTR(-ENOENT);
50
51 pdev = of_find_device_by_node(np);
52 if (!pdev) {
53 bpmp = ERR_PTR(-ENODEV);
54 goto put;
55 }
56
57 bpmp = platform_get_drvdata(pdev);
58 if (!bpmp) {
59 bpmp = ERR_PTR(-EPROBE_DEFER);
60 put_device(&pdev->dev);
61 goto put;
62 }
63
64 put:
65 of_node_put(np);
66 return bpmp;
67 }
68 EXPORT_SYMBOL_GPL(tegra_bpmp_get);
69
tegra_bpmp_put(struct tegra_bpmp * bpmp)70 void tegra_bpmp_put(struct tegra_bpmp *bpmp)
71 {
72 if (bpmp)
73 put_device(bpmp->dev);
74 }
75 EXPORT_SYMBOL_GPL(tegra_bpmp_put);
76
77 static int
tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel * channel)78 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
79 {
80 struct tegra_bpmp *bpmp = channel->bpmp;
81 unsigned int count;
82 int index;
83
84 count = bpmp->soc->channels.thread.count;
85
86 index = channel - channel->bpmp->threaded_channels;
87 if (index < 0 || index >= count)
88 return -EINVAL;
89
90 return index;
91 }
92
tegra_bpmp_message_valid(const struct tegra_bpmp_message * msg)93 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
94 {
95 return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
96 (msg->rx.size <= MSG_DATA_MIN_SZ) &&
97 (msg->tx.size == 0 || msg->tx.data) &&
98 (msg->rx.size == 0 || msg->rx.data);
99 }
100
tegra_bpmp_is_response_ready(struct tegra_bpmp_channel * channel)101 static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel)
102 {
103 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
104
105 return ops->is_response_ready(channel);
106 }
107
tegra_bpmp_is_request_ready(struct tegra_bpmp_channel * channel)108 static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel)
109 {
110 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
111
112 return ops->is_request_ready(channel);
113 }
114
tegra_bpmp_wait_response(struct tegra_bpmp_channel * channel)115 static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel)
116 {
117 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
118 ktime_t end;
119
120 end = ktime_add_us(ktime_get(), timeout);
121
122 do {
123 if (tegra_bpmp_is_response_ready(channel))
124 return 0;
125 } while (ktime_before(ktime_get(), end));
126
127 return -ETIMEDOUT;
128 }
129
tegra_bpmp_ack_response(struct tegra_bpmp_channel * channel)130 static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel)
131 {
132 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
133
134 return ops->ack_response(channel);
135 }
136
tegra_bpmp_ack_request(struct tegra_bpmp_channel * channel)137 static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel)
138 {
139 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
140
141 return ops->ack_request(channel);
142 }
143
144 static bool
tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel * channel)145 tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel)
146 {
147 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
148
149 return ops->is_request_channel_free(channel);
150 }
151
152 static bool
tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel * channel)153 tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel)
154 {
155 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
156
157 return ops->is_response_channel_free(channel);
158 }
159
160 static int
tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel * channel)161 tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel)
162 {
163 unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
164 ktime_t start, now;
165
166 start = ns_to_ktime(local_clock());
167
168 do {
169 if (tegra_bpmp_is_request_channel_free(channel))
170 return 0;
171
172 now = ns_to_ktime(local_clock());
173 } while (ktime_us_delta(now, start) < timeout);
174
175 return -ETIMEDOUT;
176 }
177
tegra_bpmp_post_request(struct tegra_bpmp_channel * channel)178 static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel)
179 {
180 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
181
182 return ops->post_request(channel);
183 }
184
tegra_bpmp_post_response(struct tegra_bpmp_channel * channel)185 static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel)
186 {
187 const struct tegra_bpmp_ops *ops = channel_to_ops(channel);
188
189 return ops->post_response(channel);
190 }
191
tegra_bpmp_ring_doorbell(struct tegra_bpmp * bpmp)192 static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp)
193 {
194 return bpmp->soc->ops->ring_doorbell(bpmp);
195 }
196
__tegra_bpmp_channel_read(struct tegra_bpmp_channel * channel,void * data,size_t size,int * ret)197 static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
198 void *data, size_t size, int *ret)
199 {
200 int err;
201
202 if (data && size > 0)
203 tegra_bpmp_mb_read(data, &channel->ib, size);
204
205 err = tegra_bpmp_ack_response(channel);
206 if (err < 0)
207 return err;
208
209 *ret = tegra_bpmp_mb_read_field(&channel->ib, code);
210
211 return 0;
212 }
213
tegra_bpmp_channel_read(struct tegra_bpmp_channel * channel,void * data,size_t size,int * ret)214 static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
215 void *data, size_t size, int *ret)
216 {
217 struct tegra_bpmp *bpmp = channel->bpmp;
218 unsigned long flags;
219 ssize_t err;
220 int index;
221
222 index = tegra_bpmp_channel_get_thread_index(channel);
223 if (index < 0) {
224 err = index;
225 goto unlock;
226 }
227
228 spin_lock_irqsave(&bpmp->lock, flags);
229 err = __tegra_bpmp_channel_read(channel, data, size, ret);
230 clear_bit(index, bpmp->threaded.allocated);
231 spin_unlock_irqrestore(&bpmp->lock, flags);
232
233 unlock:
234 up(&bpmp->threaded.lock);
235
236 return err;
237 }
238
__tegra_bpmp_channel_write(struct tegra_bpmp_channel * channel,unsigned int mrq,unsigned long flags,const void * data,size_t size)239 static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
240 unsigned int mrq, unsigned long flags,
241 const void *data, size_t size)
242 {
243 tegra_bpmp_mb_write_field(&channel->ob, code, mrq);
244 tegra_bpmp_mb_write_field(&channel->ob, flags, flags);
245
246 if (data && size > 0)
247 tegra_bpmp_mb_write(&channel->ob, data, size);
248
249 return tegra_bpmp_post_request(channel);
250 }
251
252 static struct tegra_bpmp_channel *
tegra_bpmp_write_threaded(struct tegra_bpmp * bpmp,unsigned int mrq,const void * data,size_t size)253 tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
254 const void *data, size_t size)
255 {
256 unsigned long timeout = bpmp->soc->channels.thread.timeout;
257 unsigned int count = bpmp->soc->channels.thread.count;
258 struct tegra_bpmp_channel *channel;
259 unsigned long flags;
260 unsigned int index;
261 int err;
262
263 err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
264 if (err < 0)
265 return ERR_PTR(err);
266
267 spin_lock_irqsave(&bpmp->lock, flags);
268
269 index = find_first_zero_bit(bpmp->threaded.allocated, count);
270 if (index == count) {
271 err = -EBUSY;
272 goto unlock;
273 }
274
275 channel = &bpmp->threaded_channels[index];
276
277 if (!tegra_bpmp_is_request_channel_free(channel)) {
278 err = -EBUSY;
279 goto unlock;
280 }
281
282 set_bit(index, bpmp->threaded.allocated);
283
284 err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
285 data, size);
286 if (err < 0)
287 goto clear_allocated;
288
289 set_bit(index, bpmp->threaded.busy);
290
291 spin_unlock_irqrestore(&bpmp->lock, flags);
292 return channel;
293
294 clear_allocated:
295 clear_bit(index, bpmp->threaded.allocated);
296 unlock:
297 spin_unlock_irqrestore(&bpmp->lock, flags);
298 up(&bpmp->threaded.lock);
299
300 return ERR_PTR(err);
301 }
302
tegra_bpmp_channel_write(struct tegra_bpmp_channel * channel,unsigned int mrq,unsigned long flags,const void * data,size_t size)303 static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
304 unsigned int mrq, unsigned long flags,
305 const void *data, size_t size)
306 {
307 int err;
308
309 err = tegra_bpmp_wait_request_channel_free(channel);
310 if (err < 0)
311 return err;
312
313 return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
314 }
315
316 static int __maybe_unused tegra_bpmp_resume(struct device *dev);
317
tegra_bpmp_transfer_atomic(struct tegra_bpmp * bpmp,struct tegra_bpmp_message * msg)318 int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
319 struct tegra_bpmp_message *msg)
320 {
321 struct tegra_bpmp_channel *channel;
322 int err;
323
324 if (WARN_ON(!irqs_disabled()))
325 return -EPERM;
326
327 if (!tegra_bpmp_message_valid(msg))
328 return -EINVAL;
329
330 if (bpmp->suspended) {
331 /* Reset BPMP IPC channels during resume based on flags passed */
332 if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
333 tegra_bpmp_resume(bpmp->dev);
334 else
335 return -EAGAIN;
336 }
337
338 channel = bpmp->tx_channel;
339
340 spin_lock(&bpmp->atomic_tx_lock);
341
342 err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
343 msg->tx.data, msg->tx.size);
344 if (err < 0) {
345 spin_unlock(&bpmp->atomic_tx_lock);
346 return err;
347 }
348
349 spin_unlock(&bpmp->atomic_tx_lock);
350
351 err = tegra_bpmp_ring_doorbell(bpmp);
352 if (err < 0)
353 return err;
354
355 err = tegra_bpmp_wait_response(channel);
356 if (err < 0)
357 return err;
358
359 return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
360 &msg->rx.ret);
361 }
362 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
363
tegra_bpmp_transfer(struct tegra_bpmp * bpmp,struct tegra_bpmp_message * msg)364 int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
365 struct tegra_bpmp_message *msg)
366 {
367 struct tegra_bpmp_channel *channel;
368 unsigned long timeout;
369 int err;
370
371 if (WARN_ON(irqs_disabled()))
372 return -EPERM;
373
374 if (!tegra_bpmp_message_valid(msg))
375 return -EINVAL;
376
377 if (bpmp->suspended) {
378 /* Reset BPMP IPC channels during resume based on flags passed */
379 if (msg->flags & TEGRA_BPMP_MESSAGE_RESET)
380 tegra_bpmp_resume(bpmp->dev);
381 else
382 return -EAGAIN;
383 }
384
385 channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
386 msg->tx.size);
387 if (IS_ERR(channel))
388 return PTR_ERR(channel);
389
390 err = tegra_bpmp_ring_doorbell(bpmp);
391 if (err < 0)
392 return err;
393
394 timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
395
396 err = wait_for_completion_timeout(&channel->completion, timeout);
397 if (err == 0)
398 return -ETIMEDOUT;
399
400 return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size,
401 &msg->rx.ret);
402 }
403 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
404
tegra_bpmp_find_mrq(struct tegra_bpmp * bpmp,unsigned int mrq)405 static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
406 unsigned int mrq)
407 {
408 struct tegra_bpmp_mrq *entry;
409
410 list_for_each_entry(entry, &bpmp->mrqs, list)
411 if (entry->mrq == mrq)
412 return entry;
413
414 return NULL;
415 }
416
tegra_bpmp_mrq_return(struct tegra_bpmp_channel * channel,int code,const void * data,size_t size)417 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code,
418 const void *data, size_t size)
419 {
420 unsigned long flags = tegra_bpmp_mb_read_field(&channel->ib, flags);
421 struct tegra_bpmp *bpmp = channel->bpmp;
422 int err;
423
424 if (WARN_ON(size > MSG_DATA_MIN_SZ))
425 return;
426
427 err = tegra_bpmp_ack_request(channel);
428 if (WARN_ON(err < 0))
429 return;
430
431 if ((flags & MSG_ACK) == 0)
432 return;
433
434 if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel)))
435 return;
436
437 tegra_bpmp_mb_write_field(&channel->ob, code, code);
438
439 if (data && size > 0)
440 tegra_bpmp_mb_write(&channel->ob, data, size);
441
442 err = tegra_bpmp_post_response(channel);
443 if (WARN_ON(err < 0))
444 return;
445
446 if (flags & MSG_RING) {
447 err = tegra_bpmp_ring_doorbell(bpmp);
448 if (WARN_ON(err < 0))
449 return;
450 }
451 }
452 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return);
453
tegra_bpmp_handle_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,struct tegra_bpmp_channel * channel)454 static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
455 unsigned int mrq,
456 struct tegra_bpmp_channel *channel)
457 {
458 struct tegra_bpmp_mrq *entry;
459 u32 zero = 0;
460
461 spin_lock(&bpmp->lock);
462
463 entry = tegra_bpmp_find_mrq(bpmp, mrq);
464 if (!entry) {
465 spin_unlock(&bpmp->lock);
466 tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
467 return;
468 }
469
470 entry->handler(mrq, channel, entry->data);
471
472 spin_unlock(&bpmp->lock);
473 }
474
tegra_bpmp_request_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,tegra_bpmp_mrq_handler_t handler,void * data)475 int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
476 tegra_bpmp_mrq_handler_t handler, void *data)
477 {
478 struct tegra_bpmp_mrq *entry;
479 unsigned long flags;
480
481 if (!handler)
482 return -EINVAL;
483
484 entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
485 if (!entry)
486 return -ENOMEM;
487
488 spin_lock_irqsave(&bpmp->lock, flags);
489
490 entry->mrq = mrq;
491 entry->handler = handler;
492 entry->data = data;
493 list_add(&entry->list, &bpmp->mrqs);
494
495 spin_unlock_irqrestore(&bpmp->lock, flags);
496
497 return 0;
498 }
499 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
500
tegra_bpmp_free_mrq(struct tegra_bpmp * bpmp,unsigned int mrq,void * data)501 void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
502 {
503 struct tegra_bpmp_mrq *entry;
504 unsigned long flags;
505
506 spin_lock_irqsave(&bpmp->lock, flags);
507
508 entry = tegra_bpmp_find_mrq(bpmp, mrq);
509 if (!entry)
510 goto unlock;
511
512 list_del(&entry->list);
513 devm_kfree(bpmp->dev, entry);
514
515 unlock:
516 spin_unlock_irqrestore(&bpmp->lock, flags);
517 }
518 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
519
tegra_bpmp_mrq_is_supported(struct tegra_bpmp * bpmp,unsigned int mrq)520 bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp *bpmp, unsigned int mrq)
521 {
522 struct mrq_query_abi_request req = { .mrq = mrq };
523 struct mrq_query_abi_response resp;
524 struct tegra_bpmp_message msg = {
525 .mrq = MRQ_QUERY_ABI,
526 .tx = {
527 .data = &req,
528 .size = sizeof(req),
529 },
530 .rx = {
531 .data = &resp,
532 .size = sizeof(resp),
533 },
534 };
535 int err;
536
537 err = tegra_bpmp_transfer(bpmp, &msg);
538 if (err || msg.rx.ret)
539 return false;
540
541 return resp.status == 0;
542 }
543 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported);
544
tegra_bpmp_mrq_handle_ping(unsigned int mrq,struct tegra_bpmp_channel * channel,void * data)545 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
546 struct tegra_bpmp_channel *channel,
547 void *data)
548 {
549 struct mrq_ping_request request;
550 struct mrq_ping_response response;
551
552 tegra_bpmp_mb_read(&request, &channel->ib, sizeof(request));
553
554 memset(&response, 0, sizeof(response));
555 response.reply = request.challenge << 1;
556
557 tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
558 }
559
tegra_bpmp_ping(struct tegra_bpmp * bpmp)560 static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
561 {
562 struct mrq_ping_response response;
563 struct mrq_ping_request request;
564 struct tegra_bpmp_message msg;
565 unsigned long flags;
566 ktime_t start, end;
567 int err;
568
569 memset(&request, 0, sizeof(request));
570 request.challenge = 1;
571
572 memset(&response, 0, sizeof(response));
573
574 memset(&msg, 0, sizeof(msg));
575 msg.mrq = MRQ_PING;
576 msg.tx.data = &request;
577 msg.tx.size = sizeof(request);
578 msg.rx.data = &response;
579 msg.rx.size = sizeof(response);
580
581 local_irq_save(flags);
582 start = ktime_get();
583 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
584 end = ktime_get();
585 local_irq_restore(flags);
586
587 if (!err)
588 dev_dbg(bpmp->dev,
589 "ping ok: challenge: %u, response: %u, time: %lld\n",
590 request.challenge, response.reply,
591 ktime_to_us(ktime_sub(end, start)));
592
593 return err;
594 }
595
596 /* deprecated version of tag query */
tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp * bpmp,char * tag,size_t size)597 static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp *bpmp, char *tag,
598 size_t size)
599 {
600 struct mrq_query_tag_request request;
601 struct tegra_bpmp_message msg;
602 unsigned long flags;
603 dma_addr_t phys;
604 void *virt;
605 int err;
606
607 if (size != TAG_SZ)
608 return -EINVAL;
609
610 virt = dma_alloc_coherent(bpmp->dev, TAG_SZ, &phys,
611 GFP_KERNEL | GFP_DMA32);
612 if (!virt)
613 return -ENOMEM;
614
615 memset(&request, 0, sizeof(request));
616 request.addr = phys;
617
618 memset(&msg, 0, sizeof(msg));
619 msg.mrq = MRQ_QUERY_TAG;
620 msg.tx.data = &request;
621 msg.tx.size = sizeof(request);
622
623 local_irq_save(flags);
624 err = tegra_bpmp_transfer_atomic(bpmp, &msg);
625 local_irq_restore(flags);
626
627 if (err == 0)
628 memcpy(tag, virt, TAG_SZ);
629
630 dma_free_coherent(bpmp->dev, TAG_SZ, virt, phys);
631
632 return err;
633 }
634
tegra_bpmp_get_firmware_tag(struct tegra_bpmp * bpmp,char * tag,size_t size)635 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
636 size_t size)
637 {
638 if (tegra_bpmp_mrq_is_supported(bpmp, MRQ_QUERY_FW_TAG)) {
639 struct mrq_query_fw_tag_response resp;
640 struct tegra_bpmp_message msg = {
641 .mrq = MRQ_QUERY_FW_TAG,
642 .rx = {
643 .data = &resp,
644 .size = sizeof(resp),
645 },
646 };
647 int err;
648
649 if (size != sizeof(resp.tag))
650 return -EINVAL;
651
652 err = tegra_bpmp_transfer(bpmp, &msg);
653
654 if (err)
655 return err;
656 if (msg.rx.ret < 0)
657 return -EINVAL;
658
659 memcpy(tag, resp.tag, sizeof(resp.tag));
660 return 0;
661 }
662
663 return tegra_bpmp_get_firmware_tag_old(bpmp, tag, size);
664 }
665
tegra_bpmp_channel_signal(struct tegra_bpmp_channel * channel)666 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
667 {
668 unsigned long flags = tegra_bpmp_mb_read_field(&channel->ob, flags);
669
670 if ((flags & MSG_RING) == 0)
671 return;
672
673 complete(&channel->completion);
674 }
675
tegra_bpmp_handle_rx(struct tegra_bpmp * bpmp)676 void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp)
677 {
678 struct tegra_bpmp_channel *channel;
679 unsigned int i, count;
680 unsigned long *busy;
681
682 channel = bpmp->rx_channel;
683 count = bpmp->soc->channels.thread.count;
684 busy = bpmp->threaded.busy;
685
686 if (tegra_bpmp_is_request_ready(channel)) {
687 unsigned int mrq = tegra_bpmp_mb_read_field(&channel->ib, code);
688
689 tegra_bpmp_handle_mrq(bpmp, mrq, channel);
690 }
691
692 spin_lock(&bpmp->lock);
693
694 for_each_set_bit(i, busy, count) {
695 struct tegra_bpmp_channel *channel;
696
697 channel = &bpmp->threaded_channels[i];
698
699 if (tegra_bpmp_is_response_ready(channel)) {
700 tegra_bpmp_channel_signal(channel);
701 clear_bit(i, busy);
702 }
703 }
704
705 spin_unlock(&bpmp->lock);
706 }
707
tegra_bpmp_probe(struct platform_device * pdev)708 static int tegra_bpmp_probe(struct platform_device *pdev)
709 {
710 struct tegra_bpmp *bpmp;
711 char tag[TAG_SZ];
712 size_t size;
713 int err;
714
715 bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
716 if (!bpmp)
717 return -ENOMEM;
718
719 bpmp->soc = of_device_get_match_data(&pdev->dev);
720 bpmp->dev = &pdev->dev;
721
722 INIT_LIST_HEAD(&bpmp->mrqs);
723 spin_lock_init(&bpmp->lock);
724
725 bpmp->threaded.count = bpmp->soc->channels.thread.count;
726 sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
727
728 size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
729
730 bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
731 if (!bpmp->threaded.allocated)
732 return -ENOMEM;
733
734 bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
735 if (!bpmp->threaded.busy)
736 return -ENOMEM;
737
738 spin_lock_init(&bpmp->atomic_tx_lock);
739 bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel),
740 GFP_KERNEL);
741 if (!bpmp->tx_channel)
742 return -ENOMEM;
743
744 bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel),
745 GFP_KERNEL);
746 if (!bpmp->rx_channel)
747 return -ENOMEM;
748
749 bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count,
750 sizeof(*bpmp->threaded_channels),
751 GFP_KERNEL);
752 if (!bpmp->threaded_channels)
753 return -ENOMEM;
754
755 platform_set_drvdata(pdev, bpmp);
756
757 err = bpmp->soc->ops->init(bpmp);
758 if (err < 0)
759 return err;
760
761 err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
762 tegra_bpmp_mrq_handle_ping, bpmp);
763 if (err < 0)
764 goto deinit;
765
766 err = tegra_bpmp_ping(bpmp);
767 if (err < 0) {
768 dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
769 goto free_mrq;
770 }
771
772 err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag));
773 if (err < 0) {
774 dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
775 goto free_mrq;
776 }
777
778 dev_info(&pdev->dev, "firmware: %.*s\n", (int)sizeof(tag), tag);
779
780 err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
781 if (err < 0)
782 goto free_mrq;
783
784 if (of_property_present(pdev->dev.of_node, "#clock-cells")) {
785 err = tegra_bpmp_init_clocks(bpmp);
786 if (err < 0)
787 goto free_mrq;
788 }
789
790 if (of_property_present(pdev->dev.of_node, "#reset-cells")) {
791 err = tegra_bpmp_init_resets(bpmp);
792 if (err < 0)
793 goto free_mrq;
794 }
795
796 if (of_property_present(pdev->dev.of_node, "#power-domain-cells")) {
797 err = tegra_bpmp_init_powergates(bpmp);
798 if (err < 0)
799 goto free_mrq;
800 }
801
802 err = tegra_bpmp_init_debugfs(bpmp);
803 if (err < 0)
804 dev_err(&pdev->dev, "debugfs initialization failed: %d\n", err);
805
806 return 0;
807
808 free_mrq:
809 tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
810 deinit:
811 if (bpmp->soc->ops->deinit)
812 bpmp->soc->ops->deinit(bpmp);
813
814 return err;
815 }
816
tegra_bpmp_suspend(struct device * dev)817 static int __maybe_unused tegra_bpmp_suspend(struct device *dev)
818 {
819 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
820
821 bpmp->suspended = true;
822
823 return 0;
824 }
825
tegra_bpmp_resume(struct device * dev)826 static int __maybe_unused tegra_bpmp_resume(struct device *dev)
827 {
828 struct tegra_bpmp *bpmp = dev_get_drvdata(dev);
829
830 bpmp->suspended = false;
831
832 if (bpmp->soc->ops->resume)
833 return bpmp->soc->ops->resume(bpmp);
834 else
835 return 0;
836 }
837
838 static const struct dev_pm_ops tegra_bpmp_pm_ops = {
839 .suspend_noirq = tegra_bpmp_suspend,
840 .resume_noirq = tegra_bpmp_resume,
841 };
842
843 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
844 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
845 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
846 static const struct tegra_bpmp_soc tegra186_soc = {
847 .channels = {
848 .cpu_tx = {
849 .offset = 3,
850 .timeout = 60 * USEC_PER_SEC,
851 },
852 .thread = {
853 .offset = 0,
854 .count = 3,
855 .timeout = 600 * USEC_PER_SEC,
856 },
857 .cpu_rx = {
858 .offset = 13,
859 .timeout = 0,
860 },
861 },
862 .ops = &tegra186_bpmp_ops,
863 .num_resets = 193,
864 };
865 #endif
866
867 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
868 static const struct tegra_bpmp_soc tegra210_soc = {
869 .channels = {
870 .cpu_tx = {
871 .offset = 0,
872 .count = 1,
873 .timeout = 60 * USEC_PER_SEC,
874 },
875 .thread = {
876 .offset = 4,
877 .count = 1,
878 .timeout = 600 * USEC_PER_SEC,
879 },
880 .cpu_rx = {
881 .offset = 8,
882 .count = 1,
883 .timeout = 0,
884 },
885 },
886 .ops = &tegra210_bpmp_ops,
887 };
888 #endif
889
890 static const struct of_device_id tegra_bpmp_match[] = {
891 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
892 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
893 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
894 { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
895 #endif
896 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
897 { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc },
898 #endif
899 { }
900 };
901
902 static struct platform_driver tegra_bpmp_driver = {
903 .driver = {
904 .name = "tegra-bpmp",
905 .of_match_table = tegra_bpmp_match,
906 .pm = &tegra_bpmp_pm_ops,
907 .suppress_bind_attrs = true,
908 },
909 .probe = tegra_bpmp_probe,
910 };
911 builtin_platform_driver(tegra_bpmp_driver);
912