1 /*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
10 #include <errno.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <string.h>
14
15 #include <platform_def.h>
16
17 #include <common/debug.h>
18 #include <sec_proxy.h>
19
20 #include "ti_sci_protocol.h"
21 #include "ti_sci.h"
22
23 #if USE_COHERENT_MEM
24 __section("tzfw_coherent_mem")
25 #endif
26 static uint8_t message_sequence;
27
28 /**
29 * struct ti_sci_xfer - Structure representing a message flow
30 * @tx_message: Transmit message
31 * @rx_message: Receive message
32 */
33 struct ti_sci_xfer {
34 struct k3_sec_proxy_msg tx_message;
35 struct k3_sec_proxy_msg rx_message;
36 };
37
38 /**
39 * ti_sci_setup_one_xfer() - Setup one message type
40 *
41 * @msg_type: Message type
42 * @msg_flags: Flag to set for the message
43 * @tx_buf: Buffer to be sent to mailbox channel
44 * @tx_message_size: transmit message size
45 * @rx_buf: Buffer to be received from mailbox channel
46 * @rx_message_size: receive message size
47 *
48 * Helper function which is used by various command functions that are
49 * exposed to clients of this driver for allocating a message traffic event.
50 *
51 * Return: 0 if all goes well, else appropriate error message
52 */
ti_sci_setup_one_xfer(uint16_t msg_type,uint32_t msg_flags,void * tx_buf,size_t tx_message_size,void * rx_buf,size_t rx_message_size,struct ti_sci_xfer * xfer)53 static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
54 void *tx_buf,
55 size_t tx_message_size,
56 void *rx_buf,
57 size_t rx_message_size,
58 struct ti_sci_xfer *xfer)
59 {
60 struct ti_sci_msg_hdr *hdr;
61
62 /* Ensure we have sane transfer sizes */
63 if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
64 tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
65 rx_message_size < sizeof(*hdr) ||
66 tx_message_size < sizeof(*hdr))
67 return -ERANGE;
68
69 hdr = (struct ti_sci_msg_hdr *)tx_buf;
70 hdr->seq = ++message_sequence;
71 hdr->type = msg_type;
72 hdr->host = TI_SCI_HOST_ID;
73 hdr->flags = msg_flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
74
75 xfer->tx_message.buf = tx_buf;
76 xfer->tx_message.len = tx_message_size;
77
78 xfer->rx_message.buf = rx_buf;
79 xfer->rx_message.len = rx_message_size;
80
81 return 0;
82 }
83
84 /**
85 * ti_sci_get_response() - Receive response from mailbox channel
86 *
87 * @xfer: Transfer to initiate and wait for response
88 * @chan: Channel to receive the response
89 *
90 * Return: 0 if all goes well, else appropriate error message
91 */
ti_sci_get_response(struct ti_sci_xfer * xfer,enum k3_sec_proxy_chan_id chan)92 static inline int ti_sci_get_response(struct ti_sci_xfer *xfer,
93 enum k3_sec_proxy_chan_id chan)
94 {
95 struct k3_sec_proxy_msg *msg = &xfer->rx_message;
96 struct ti_sci_msg_hdr *hdr;
97 unsigned int retry = 5;
98 int ret;
99
100 for (; retry > 0; retry--) {
101 /* Receive the response */
102 ret = k3_sec_proxy_recv(chan, msg);
103 if (ret) {
104 ERROR("Message receive failed (%d)\n", ret);
105 return ret;
106 }
107
108 /* msg is updated by Secure Proxy driver */
109 hdr = (struct ti_sci_msg_hdr *)msg->buf;
110
111 /* Sanity check for message response */
112 if (hdr->seq == message_sequence)
113 break;
114 else
115 WARN("Message with sequence ID %u is not expected\n", hdr->seq);
116 }
117 if (!retry) {
118 ERROR("Timed out waiting for message\n");
119 return -EINVAL;
120 }
121
122 if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
123 ERROR("Unable to handle %lu xfer (max %d)\n",
124 msg->len, TI_SCI_MAX_MESSAGE_SIZE);
125 return -EINVAL;
126 }
127
128 if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
129 return -ENODEV;
130
131 return 0;
132 }
133
134 /**
135 * ti_sci_do_xfer() - Do one transfer
136 *
137 * @xfer: Transfer to initiate and wait for response
138 *
139 * Return: 0 if all goes well, else appropriate error message
140 */
ti_sci_do_xfer(struct ti_sci_xfer * xfer)141 static inline int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
142 {
143 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
144 int ret;
145
146 /* Clear any spurious messages in receive queue */
147 ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE);
148 if (ret) {
149 ERROR("Could not clear response queue (%d)\n", ret);
150 return ret;
151 }
152
153 /* Send the message */
154 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, msg);
155 if (ret) {
156 ERROR("Message sending failed (%d)\n", ret);
157 return ret;
158 }
159
160 /* Get the response */
161 ret = ti_sci_get_response(xfer, SP_RESPONSE);
162 if (ret) {
163 ERROR("Failed to get response (%d)\n", ret);
164 return ret;
165 }
166
167 return 0;
168 }
169
170 /**
171 * ti_sci_get_revision() - Get the revision of the SCI entity
172 *
173 * Updates the SCI information in the internal data structure.
174 *
175 * Return: 0 if all goes well, else appropriate error message
176 */
ti_sci_get_revision(struct ti_sci_msg_resp_version * rev_info)177 int ti_sci_get_revision(struct ti_sci_msg_resp_version *rev_info)
178 {
179 struct ti_sci_msg_hdr hdr;
180 struct ti_sci_xfer xfer;
181 int ret;
182
183 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
184 &hdr, sizeof(hdr),
185 rev_info, sizeof(*rev_info),
186 &xfer);
187 if (ret) {
188 ERROR("Message alloc failed (%d)\n", ret);
189 return ret;
190 }
191
192 ret = ti_sci_do_xfer(&xfer);
193 if (ret) {
194 ERROR("Transfer send failed (%d)\n", ret);
195 return ret;
196 }
197
198 return 0;
199 }
200
201 /**
202 * ti_sci_device_set_state() - Set device state
203 *
204 * @id: Device identifier
205 * @flags: flags to setup for the device
206 * @state: State to move the device to
207 *
208 * Return: 0 if all goes well, else appropriate error message
209 */
ti_sci_device_set_state(uint32_t id,uint32_t flags,uint8_t state)210 static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
211 {
212 struct ti_sci_msg_req_set_device_state req;
213 struct ti_sci_msg_hdr resp;
214
215 struct ti_sci_xfer xfer;
216 int ret;
217
218 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
219 &req, sizeof(req),
220 &resp, sizeof(resp),
221 &xfer);
222 if (ret) {
223 ERROR("Message alloc failed (%d)\n", ret);
224 return ret;
225 }
226
227 req.id = id;
228 req.state = state;
229
230 ret = ti_sci_do_xfer(&xfer);
231 if (ret) {
232 ERROR("Transfer send failed (%d)\n", ret);
233 return ret;
234 }
235
236 return 0;
237 }
238
239 /**
240 * ti_sci_device_get_state() - Get device state
241 *
242 * @id: Device Identifier
243 * @clcnt: Pointer to Context Loss Count
244 * @resets: pointer to resets
245 * @p_state: pointer to p_state
246 * @c_state: pointer to c_state
247 *
248 * Return: 0 if all goes well, else appropriate error message
249 */
ti_sci_device_get_state(uint32_t id,uint32_t * clcnt,uint32_t * resets,uint8_t * p_state,uint8_t * c_state)250 static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
251 uint32_t *resets, uint8_t *p_state,
252 uint8_t *c_state)
253 {
254 struct ti_sci_msg_req_get_device_state req;
255 struct ti_sci_msg_resp_get_device_state resp;
256
257 struct ti_sci_xfer xfer;
258 int ret;
259
260 if (!clcnt && !resets && !p_state && !c_state)
261 return -EINVAL;
262
263 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
264 &req, sizeof(req),
265 &resp, sizeof(resp),
266 &xfer);
267 if (ret) {
268 ERROR("Message alloc failed (%d)\n", ret);
269 return ret;
270 }
271
272 req.id = id;
273
274 ret = ti_sci_do_xfer(&xfer);
275 if (ret) {
276 ERROR("Transfer send failed (%d)\n", ret);
277 return ret;
278 }
279
280 if (clcnt)
281 *clcnt = resp.context_loss_count;
282 if (resets)
283 *resets = resp.resets;
284 if (p_state)
285 *p_state = resp.programmed_state;
286 if (c_state)
287 *c_state = resp.current_state;
288
289 return 0;
290 }
291
292 /**
293 * ti_sci_device_get() - Request for device managed by TISCI
294 *
295 * @id: Device Identifier
296 *
297 * Request for the device - NOTE: the client MUST maintain integrity of
298 * usage count by balancing get_device with put_device. No refcounting is
299 * managed by driver for that purpose.
300 *
301 * Return: 0 if all goes well, else appropriate error message
302 */
ti_sci_device_get(uint32_t id)303 int ti_sci_device_get(uint32_t id)
304 {
305 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
306 }
307
308 /**
309 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
310 *
311 * @id: Device Identifier
312 *
313 * Request for the device - NOTE: the client MUST maintain integrity of
314 * usage count by balancing get_device with put_device. No refcounting is
315 * managed by driver for that purpose.
316 *
317 * NOTE: This _exclusive version of the get API is for exclusive access to the
318 * device. Any other host in the system will fail to get this device after this
319 * call until exclusive access is released with device_put or a non-exclusive
320 * set call.
321 *
322 * Return: 0 if all goes well, else appropriate error message
323 */
ti_sci_device_get_exclusive(uint32_t id)324 int ti_sci_device_get_exclusive(uint32_t id)
325 {
326 return ti_sci_device_set_state(id,
327 MSG_FLAG_DEVICE_EXCLUSIVE,
328 MSG_DEVICE_SW_STATE_ON);
329 }
330
331 /**
332 * ti_sci_device_idle() - Idle a device managed by TISCI
333 *
334 * @id: Device Identifier
335 *
336 * Request for the device - NOTE: the client MUST maintain integrity of
337 * usage count by balancing get_device with put_device. No refcounting is
338 * managed by driver for that purpose.
339 *
340 * Return: 0 if all goes well, else appropriate error message
341 */
ti_sci_device_idle(uint32_t id)342 int ti_sci_device_idle(uint32_t id)
343 {
344 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
345 }
346
347 /**
348 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
349 *
350 * @id: Device Identifier
351 *
352 * Request for the device - NOTE: the client MUST maintain integrity of
353 * usage count by balancing get_device with put_device. No refcounting is
354 * managed by driver for that purpose.
355 *
356 * NOTE: This _exclusive version of the idle API is for exclusive access to
357 * the device. Any other host in the system will fail to get this device after
358 * this call until exclusive access is released with device_put or a
359 * non-exclusive set call.
360 *
361 * Return: 0 if all goes well, else appropriate error message
362 */
ti_sci_device_idle_exclusive(uint32_t id)363 int ti_sci_device_idle_exclusive(uint32_t id)
364 {
365 return ti_sci_device_set_state(id,
366 MSG_FLAG_DEVICE_EXCLUSIVE,
367 MSG_DEVICE_SW_STATE_RETENTION);
368 }
369
370 /**
371 * ti_sci_device_put() - Release a device managed by TISCI
372 *
373 * @id: Device Identifier
374 *
375 * Request for the device - NOTE: the client MUST maintain integrity of
376 * usage count by balancing get_device with put_device. No refcounting is
377 * managed by driver for that purpose.
378 *
379 * Return: 0 if all goes well, else appropriate error message
380 */
ti_sci_device_put(uint32_t id)381 int ti_sci_device_put(uint32_t id)
382 {
383 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
384 }
385
386 /**
387 * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
388 * for a response.
389 *
390 * @id: Device Identifier
391 *
392 * Request for the device - NOTE: the client MUST maintain integrity of
393 * usage count by balancing get_device with put_device. No refcounting is
394 * managed by driver for that purpose.
395 *
396 * Return: 0 if all goes well, else appropriate error message
397 */
ti_sci_device_put_no_wait(uint32_t id)398 int ti_sci_device_put_no_wait(uint32_t id)
399 {
400 struct ti_sci_msg_req_set_device_state req;
401 struct ti_sci_msg_hdr *hdr;
402 struct k3_sec_proxy_msg tx_message;
403 int ret;
404
405 /* Ensure we have sane transfer size */
406 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
407 return -ERANGE;
408
409 hdr = (struct ti_sci_msg_hdr *)&req;
410 hdr->seq = ++message_sequence;
411 hdr->type = TI_SCI_MSG_SET_DEVICE_STATE;
412 hdr->host = TI_SCI_HOST_ID;
413 /* Setup with NORESPONSE flag to keep response queue clean */
414 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
415
416 req.id = id;
417 req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
418
419 tx_message.buf = (uint8_t *)&req;
420 tx_message.len = sizeof(req);
421
422 /* Send message */
423 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
424 if (ret) {
425 ERROR("Message sending failed (%d)\n", ret);
426 return ret;
427 }
428
429 /* Return without waiting for response */
430 return 0;
431 }
432
433 /**
434 * ti_sci_device_is_valid() - Is the device valid
435 *
436 * @id: Device Identifier
437 *
438 * Return: 0 if all goes well and the device ID is valid, else return
439 * appropriate error
440 */
ti_sci_device_is_valid(uint32_t id)441 int ti_sci_device_is_valid(uint32_t id)
442 {
443 uint8_t unused;
444
445 /* check the device state which will also tell us if the ID is valid */
446 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
447 }
448
449 /**
450 * ti_sci_device_get_clcnt() - Get context loss counter
451 *
452 * @id: Device Identifier
453 * @count: Pointer to Context Loss counter to populate
454 *
455 * Return: 0 if all goes well, else appropriate error message
456 */
ti_sci_device_get_clcnt(uint32_t id,uint32_t * count)457 int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
458 {
459 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
460 }
461
462 /**
463 * ti_sci_device_is_idle() - Check if the device is requested to be idle
464 *
465 * @id: Device Identifier
466 * @r_state: true if requested to be idle
467 *
468 * Return: 0 if all goes well, else appropriate error message
469 */
ti_sci_device_is_idle(uint32_t id,bool * r_state)470 int ti_sci_device_is_idle(uint32_t id, bool *r_state)
471 {
472 int ret;
473 uint8_t state;
474
475 if (!r_state)
476 return -EINVAL;
477
478 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
479 if (ret)
480 return ret;
481
482 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
483
484 return 0;
485 }
486
487 /**
488 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
489 *
490 * @id: Device Identifier
491 * @r_state: true if requested to be stopped
492 * @curr_state: true if currently stopped
493 *
494 * Return: 0 if all goes well, else appropriate error message
495 */
ti_sci_device_is_stop(uint32_t id,bool * r_state,bool * curr_state)496 int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
497 {
498 int ret;
499 uint8_t p_state, c_state;
500
501 if (!r_state && !curr_state)
502 return -EINVAL;
503
504 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
505 if (ret)
506 return ret;
507
508 if (r_state)
509 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
510 if (curr_state)
511 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
512
513 return 0;
514 }
515
516 /**
517 * ti_sci_device_is_on() - Check if the device is requested to be ON
518 *
519 * @id: Device Identifier
520 * @r_state: true if requested to be ON
521 * @curr_state: true if currently ON and active
522 *
523 * Return: 0 if all goes well, else appropriate error message
524 */
ti_sci_device_is_on(uint32_t id,bool * r_state,bool * curr_state)525 int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
526 {
527 int ret;
528 uint8_t p_state, c_state;
529
530 if (!r_state && !curr_state)
531 return -EINVAL;
532
533 ret =
534 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
535 if (ret)
536 return ret;
537
538 if (r_state)
539 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
540 if (curr_state)
541 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
542
543 return 0;
544 }
545
546 /**
547 * ti_sci_device_is_trans() - Check if the device is currently transitioning
548 *
549 * @id: Device Identifier
550 * @curr_state: true if currently transitioning
551 *
552 * Return: 0 if all goes well, else appropriate error message
553 */
ti_sci_device_is_trans(uint32_t id,bool * curr_state)554 int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
555 {
556 int ret;
557 uint8_t state;
558
559 if (!curr_state)
560 return -EINVAL;
561
562 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
563 if (ret)
564 return ret;
565
566 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
567
568 return 0;
569 }
570
571 /**
572 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
573 *
574 * @id: Device Identifier
575 * @reset_state: Device specific reset bit field
576 *
577 * Return: 0 if all goes well, else appropriate error message
578 */
ti_sci_device_set_resets(uint32_t id,uint32_t reset_state)579 int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
580 {
581 struct ti_sci_msg_req_set_device_resets req;
582 struct ti_sci_msg_hdr resp;
583
584 struct ti_sci_xfer xfer;
585 int ret;
586
587 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
588 &req, sizeof(req),
589 &resp, sizeof(resp),
590 &xfer);
591 if (ret) {
592 ERROR("Message alloc failed (%d)\n", ret);
593 return ret;
594 }
595
596 req.id = id;
597 req.resets = reset_state;
598
599 ret = ti_sci_do_xfer(&xfer);
600 if (ret) {
601 ERROR("Transfer send failed (%d)\n", ret);
602 return ret;
603 }
604
605 return 0;
606 }
607
608 /**
609 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
610 *
611 * @id: Device Identifier
612 * @reset_state: Pointer to reset state to populate
613 *
614 * Return: 0 if all goes well, else appropriate error message
615 */
ti_sci_device_get_resets(uint32_t id,uint32_t * reset_state)616 int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
617 {
618 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
619 }
620
621 /**
622 * ti_sci_clock_set_state() - Set clock state helper
623 *
624 * @dev_id: Device identifier this request is for
625 * @clk_id: Clock identifier for the device for this request,
626 * Each device has its own set of clock inputs, This indexes
627 * which clock input to modify
628 * @flags: Header flags as needed
629 * @state: State to request for the clock
630 *
631 * Return: 0 if all goes well, else appropriate error message
632 */
ti_sci_clock_set_state(uint32_t dev_id,uint8_t clk_id,uint32_t flags,uint8_t state)633 int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
634 uint32_t flags, uint8_t state)
635 {
636 struct ti_sci_msg_req_set_clock_state req;
637 struct ti_sci_msg_hdr resp;
638
639 struct ti_sci_xfer xfer;
640 int ret;
641
642 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
643 &req, sizeof(req),
644 &resp, sizeof(resp),
645 &xfer);
646 if (ret) {
647 ERROR("Message alloc failed (%d)\n", ret);
648 return ret;
649 }
650
651 req.dev_id = dev_id;
652 req.clk_id = clk_id;
653 req.request_state = state;
654
655 ret = ti_sci_do_xfer(&xfer);
656 if (ret) {
657 ERROR("Transfer send failed (%d)\n", ret);
658 return ret;
659 }
660
661 return 0;
662 }
663
664 /**
665 * ti_sci_clock_get_state() - Get clock state helper
666 *
667 * @dev_id: Device identifier this request is for
668 * @clk_id: Clock identifier for the device for this request.
669 * Each device has its own set of clock inputs. This indexes
670 * which clock input to modify.
671 * @programmed_state: State requested for clock to move to
672 * @current_state: State that the clock is currently in
673 *
674 * Return: 0 if all goes well, else appropriate error message
675 */
ti_sci_clock_get_state(uint32_t dev_id,uint8_t clk_id,uint8_t * programmed_state,uint8_t * current_state)676 int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
677 uint8_t *programmed_state,
678 uint8_t *current_state)
679 {
680 struct ti_sci_msg_req_get_clock_state req;
681 struct ti_sci_msg_resp_get_clock_state resp;
682
683 struct ti_sci_xfer xfer;
684 int ret;
685
686 if (!programmed_state && !current_state)
687 return -EINVAL;
688
689 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
690 &req, sizeof(req),
691 &resp, sizeof(resp),
692 &xfer);
693 if (ret) {
694 ERROR("Message alloc failed (%d)\n", ret);
695 return ret;
696 }
697
698 req.dev_id = dev_id;
699 req.clk_id = clk_id;
700
701 ret = ti_sci_do_xfer(&xfer);
702 if (ret) {
703 ERROR("Transfer send failed (%d)\n", ret);
704 return ret;
705 }
706
707 if (programmed_state)
708 *programmed_state = resp.programmed_state;
709 if (current_state)
710 *current_state = resp.current_state;
711
712 return 0;
713 }
714
715 /**
716 * ti_sci_clock_get() - Get control of a clock from TI SCI
717
718 * @dev_id: Device identifier this request is for
719 * @clk_id: Clock identifier for the device for this request.
720 * Each device has its own set of clock inputs. This indexes
721 * which clock input to modify.
722 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
723 * @can_change_freq: 'true' iff frequency change is desired
724 * @enable_input_term: 'true' iff input termination is desired
725 *
726 * Return: 0 if all goes well, else appropriate error message
727 */
ti_sci_clock_get(uint32_t dev_id,uint8_t clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)728 int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
729 bool needs_ssc, bool can_change_freq,
730 bool enable_input_term)
731 {
732 uint32_t flags = 0;
733
734 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
735 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
736 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
737
738 return ti_sci_clock_set_state(dev_id, clk_id, flags,
739 MSG_CLOCK_SW_STATE_REQ);
740 }
741
742 /**
743 * ti_sci_clock_idle() - Idle a clock which is in our control
744
745 * @dev_id: Device identifier this request is for
746 * @clk_id: Clock identifier for the device for this request.
747 * Each device has its own set of clock inputs. This indexes
748 * which clock input to modify.
749 *
750 * NOTE: This clock must have been requested by get_clock previously.
751 *
752 * Return: 0 if all goes well, else appropriate error message
753 */
ti_sci_clock_idle(uint32_t dev_id,uint8_t clk_id)754 int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
755 {
756 return ti_sci_clock_set_state(dev_id, clk_id, 0,
757 MSG_CLOCK_SW_STATE_UNREQ);
758 }
759
760 /**
761 * ti_sci_clock_put() - Release a clock from our control
762 *
763 * @dev_id: Device identifier this request is for
764 * @clk_id: Clock identifier for the device for this request.
765 * Each device has its own set of clock inputs. This indexes
766 * which clock input to modify.
767 *
768 * NOTE: This clock must have been requested by get_clock previously.
769 *
770 * Return: 0 if all goes well, else appropriate error message
771 */
ti_sci_clock_put(uint32_t dev_id,uint8_t clk_id)772 int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
773 {
774 return ti_sci_clock_set_state(dev_id, clk_id, 0,
775 MSG_CLOCK_SW_STATE_AUTO);
776 }
777
778 /**
779 * ti_sci_clock_is_auto() - Is the clock being auto managed
780 *
781 * @dev_id: Device identifier this request is for
782 * @clk_id: Clock identifier for the device for this request.
783 * Each device has its own set of clock inputs. This indexes
784 * which clock input to modify.
785 * @req_state: state indicating if the clock is auto managed
786 *
787 * Return: 0 if all goes well, else appropriate error message
788 */
ti_sci_clock_is_auto(uint32_t dev_id,uint8_t clk_id,bool * req_state)789 int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
790 {
791 uint8_t state = 0;
792 int ret;
793
794 if (!req_state)
795 return -EINVAL;
796
797 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
798 if (ret)
799 return ret;
800
801 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
802
803 return 0;
804 }
805
806 /**
807 * ti_sci_clock_is_on() - Is the clock ON
808 *
809 * @dev_id: Device identifier this request is for
810 * @clk_id: Clock identifier for the device for this request.
811 * Each device has its own set of clock inputs. This indexes
812 * which clock input to modify.
813 * @req_state: state indicating if the clock is managed by us and enabled
814 * @curr_state: state indicating if the clock is ready for operation
815 *
816 * Return: 0 if all goes well, else appropriate error message
817 */
ti_sci_clock_is_on(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)818 int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
819 bool *req_state, bool *curr_state)
820 {
821 uint8_t c_state = 0, r_state = 0;
822 int ret;
823
824 if (!req_state && !curr_state)
825 return -EINVAL;
826
827 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
828 if (ret)
829 return ret;
830
831 if (req_state)
832 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
833 if (curr_state)
834 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
835
836 return 0;
837 }
838
839 /**
840 * ti_sci_clock_is_off() - Is the clock OFF
841 *
842 * @dev_id: Device identifier this request is for
843 * @clk_id: Clock identifier for the device for this request.
844 * Each device has its own set of clock inputs. This indexes
845 * which clock input to modify.
846 * @req_state: state indicating if the clock is managed by us and disabled
847 * @curr_state: state indicating if the clock is NOT ready for operation
848 *
849 * Return: 0 if all goes well, else appropriate error message
850 */
ti_sci_clock_is_off(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)851 int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
852 bool *req_state, bool *curr_state)
853 {
854 uint8_t c_state = 0, r_state = 0;
855 int ret;
856
857 if (!req_state && !curr_state)
858 return -EINVAL;
859
860 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
861 if (ret)
862 return ret;
863
864 if (req_state)
865 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
866 if (curr_state)
867 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
868
869 return 0;
870 }
871
872 /**
873 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
874 *
875 * @dev_id: Device identifier this request is for
876 * @clk_id: Clock identifier for the device for this request.
877 * Each device has its own set of clock inputs. This indexes
878 * which clock input to modify.
879 * @parent_id: Parent clock identifier to set
880 *
881 * Return: 0 if all goes well, else appropriate error message
882 */
ti_sci_clock_set_parent(uint32_t dev_id,uint8_t clk_id,uint8_t parent_id)883 int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
884 {
885 struct ti_sci_msg_req_set_clock_parent req;
886 struct ti_sci_msg_hdr resp;
887
888 struct ti_sci_xfer xfer;
889 int ret;
890
891 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
892 &req, sizeof(req),
893 &resp, sizeof(resp),
894 &xfer);
895 if (ret) {
896 ERROR("Message alloc failed (%d)\n", ret);
897 return ret;
898 }
899
900 req.dev_id = dev_id;
901 req.clk_id = clk_id;
902 req.parent_id = parent_id;
903
904 ret = ti_sci_do_xfer(&xfer);
905 if (ret) {
906 ERROR("Transfer send failed (%d)\n", ret);
907 return ret;
908 }
909
910 return 0;
911 }
912
913 /**
914 * ti_sci_clock_get_parent() - Get current parent clock source
915 *
916 * @dev_id: Device identifier this request is for
917 * @clk_id: Clock identifier for the device for this request.
918 * Each device has its own set of clock inputs. This indexes
919 * which clock input to modify.
920 * @parent_id: Current clock parent
921 *
922 * Return: 0 if all goes well, else appropriate error message
923 */
ti_sci_clock_get_parent(uint32_t dev_id,uint8_t clk_id,uint8_t * parent_id)924 int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
925 {
926 struct ti_sci_msg_req_get_clock_parent req;
927 struct ti_sci_msg_resp_get_clock_parent resp;
928
929 struct ti_sci_xfer xfer;
930 int ret;
931
932 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
933 &req, sizeof(req),
934 &resp, sizeof(resp),
935 &xfer);
936 if (ret) {
937 ERROR("Message alloc failed (%d)\n", ret);
938 return ret;
939 }
940
941 req.dev_id = dev_id;
942 req.clk_id = clk_id;
943
944 ret = ti_sci_do_xfer(&xfer);
945 if (ret) {
946 ERROR("Transfer send failed (%d)\n", ret);
947 return ret;
948 }
949
950 *parent_id = resp.parent_id;
951
952 return 0;
953 }
954
955 /**
956 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
957 *
958 * @dev_id: Device identifier this request is for
959 * @clk_id: Clock identifier for the device for this request.
960 * Each device has its own set of clock inputs. This indexes
961 * which clock input to modify.
962 * @num_parents: Returns he number of parents to the current clock.
963 *
964 * Return: 0 if all goes well, else appropriate error message
965 */
ti_sci_clock_get_num_parents(uint32_t dev_id,uint8_t clk_id,uint8_t * num_parents)966 int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
967 uint8_t *num_parents)
968 {
969 struct ti_sci_msg_req_get_clock_num_parents req;
970 struct ti_sci_msg_resp_get_clock_num_parents resp;
971
972 struct ti_sci_xfer xfer;
973 int ret;
974
975 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
976 &req, sizeof(req),
977 &resp, sizeof(resp),
978 &xfer);
979 if (ret) {
980 ERROR("Message alloc failed (%d)\n", ret);
981 return ret;
982 }
983
984 req.dev_id = dev_id;
985 req.clk_id = clk_id;
986
987 ret = ti_sci_do_xfer(&xfer);
988 if (ret) {
989 ERROR("Transfer send failed (%d)\n", ret);
990 return ret;
991 }
992
993 *num_parents = resp.num_parents;
994
995 return 0;
996 }
997
998 /**
999 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1000 *
1001 * @dev_id: Device identifier this request is for
1002 * @clk_id: Clock identifier for the device for this request.
1003 * Each device has its own set of clock inputs. This indexes
1004 * which clock input to modify.
1005 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1006 * allowable programmed frequency and does not account for clock
1007 * tolerances and jitter.
1008 * @target_freq: The target clock frequency in Hz. A frequency will be
1009 * processed as close to this target frequency as possible.
1010 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1011 * allowable programmed frequency and does not account for clock
1012 * tolerances and jitter.
1013 * @match_freq: Frequency match in Hz response.
1014 *
1015 * Return: 0 if all goes well, else appropriate error message
1016 */
ti_sci_clock_get_match_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq,uint64_t * match_freq)1017 int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1018 uint64_t min_freq, uint64_t target_freq,
1019 uint64_t max_freq, uint64_t *match_freq)
1020 {
1021 struct ti_sci_msg_req_query_clock_freq req;
1022 struct ti_sci_msg_resp_query_clock_freq resp;
1023
1024 struct ti_sci_xfer xfer;
1025 int ret;
1026
1027 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
1028 &req, sizeof(req),
1029 &resp, sizeof(resp),
1030 &xfer);
1031 if (ret) {
1032 ERROR("Message alloc failed (%d)\n", ret);
1033 return ret;
1034 }
1035
1036 req.dev_id = dev_id;
1037 req.clk_id = clk_id;
1038 req.min_freq_hz = min_freq;
1039 req.target_freq_hz = target_freq;
1040 req.max_freq_hz = max_freq;
1041
1042 ret = ti_sci_do_xfer(&xfer);
1043 if (ret) {
1044 ERROR("Transfer send failed (%d)\n", ret);
1045 return ret;
1046 }
1047
1048 *match_freq = resp.freq_hz;
1049
1050 return 0;
1051 }
1052
1053 /**
1054 * ti_sci_clock_set_freq() - Set a frequency for clock
1055 *
1056 * @dev_id: Device identifier this request is for
1057 * @clk_id: Clock identifier for the device for this request.
1058 * Each device has its own set of clock inputs. This indexes
1059 * which clock input to modify.
1060 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1061 * allowable programmed frequency and does not account for clock
1062 * tolerances and jitter.
1063 * @target_freq: The target clock frequency in Hz. A frequency will be
1064 * processed as close to this target frequency as possible.
1065 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1066 * allowable programmed frequency and does not account for clock
1067 * tolerances and jitter.
1068 *
1069 * Return: 0 if all goes well, else appropriate error message
1070 */
ti_sci_clock_set_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq)1071 int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1072 uint64_t target_freq, uint64_t max_freq)
1073 {
1074 struct ti_sci_msg_req_set_clock_freq req;
1075 struct ti_sci_msg_hdr resp;
1076
1077 struct ti_sci_xfer xfer;
1078 int ret;
1079
1080 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
1081 &req, sizeof(req),
1082 &resp, sizeof(resp),
1083 &xfer);
1084 if (ret) {
1085 ERROR("Message alloc failed (%d)\n", ret);
1086 return ret;
1087 }
1088 req.dev_id = dev_id;
1089 req.clk_id = clk_id;
1090 req.min_freq_hz = min_freq;
1091 req.target_freq_hz = target_freq;
1092 req.max_freq_hz = max_freq;
1093
1094 ret = ti_sci_do_xfer(&xfer);
1095 if (ret) {
1096 ERROR("Transfer send failed (%d)\n", ret);
1097 return ret;
1098 }
1099
1100 return 0;
1101 }
1102
1103 /**
1104 * ti_sci_clock_get_freq() - Get current frequency
1105 *
1106 * @dev_id: Device identifier this request is for
1107 * @clk_id: Clock identifier for the device for this request.
1108 * Each device has its own set of clock inputs. This indexes
1109 * which clock input to modify.
1110 * @freq: Currently frequency in Hz
1111 *
1112 * Return: 0 if all goes well, else appropriate error message
1113 */
ti_sci_clock_get_freq(uint32_t dev_id,uint8_t clk_id,uint64_t * freq)1114 int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1115 {
1116 struct ti_sci_msg_req_get_clock_freq req;
1117 struct ti_sci_msg_resp_get_clock_freq resp;
1118
1119 struct ti_sci_xfer xfer;
1120 int ret;
1121
1122 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
1123 &req, sizeof(req),
1124 &resp, sizeof(resp),
1125 &xfer);
1126 if (ret) {
1127 ERROR("Message alloc failed (%d)\n", ret);
1128 return ret;
1129 }
1130
1131 req.dev_id = dev_id;
1132 req.clk_id = clk_id;
1133
1134 ret = ti_sci_do_xfer(&xfer);
1135 if (ret) {
1136 ERROR("Transfer send failed (%d)\n", ret);
1137 return ret;
1138 }
1139
1140 *freq = resp.freq_hz;
1141
1142 return 0;
1143 }
1144
1145 /**
1146 * ti_sci_core_reboot() - Command to request system reset
1147 *
1148 * Return: 0 if all goes well, else appropriate error message
1149 */
ti_sci_core_reboot(void)1150 int ti_sci_core_reboot(void)
1151 {
1152 struct ti_sci_msg_req_reboot req;
1153 struct ti_sci_msg_hdr resp;
1154
1155 struct ti_sci_xfer xfer;
1156 int ret;
1157
1158 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
1159 &req, sizeof(req),
1160 &resp, sizeof(resp),
1161 &xfer);
1162 if (ret) {
1163 ERROR("Message alloc failed (%d)\n", ret);
1164 return ret;
1165 }
1166
1167 ret = ti_sci_do_xfer(&xfer);
1168 if (ret) {
1169 ERROR("Transfer send failed (%d)\n", ret);
1170 return ret;
1171 }
1172
1173 return 0;
1174 }
1175
1176 /**
1177 * ti_sci_proc_request() - Request a physical processor control
1178 *
1179 * @proc_id: Processor ID this request is for
1180 *
1181 * Return: 0 if all goes well, else appropriate error message
1182 */
ti_sci_proc_request(uint8_t proc_id)1183 int ti_sci_proc_request(uint8_t proc_id)
1184 {
1185 struct ti_sci_msg_req_proc_request req;
1186 struct ti_sci_msg_hdr resp;
1187
1188 struct ti_sci_xfer xfer;
1189 int ret;
1190
1191 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
1192 &req, sizeof(req),
1193 &resp, sizeof(resp),
1194 &xfer);
1195 if (ret) {
1196 ERROR("Message alloc failed (%d)\n", ret);
1197 return ret;
1198 }
1199
1200 req.processor_id = proc_id;
1201
1202 ret = ti_sci_do_xfer(&xfer);
1203 if (ret) {
1204 ERROR("Transfer send failed (%d)\n", ret);
1205 return ret;
1206 }
1207
1208 return 0;
1209 }
1210
1211 /**
1212 * ti_sci_proc_release() - Release a physical processor control
1213 *
1214 * @proc_id: Processor ID this request is for
1215 *
1216 * Return: 0 if all goes well, else appropriate error message
1217 */
ti_sci_proc_release(uint8_t proc_id)1218 int ti_sci_proc_release(uint8_t proc_id)
1219 {
1220 struct ti_sci_msg_req_proc_release req;
1221 struct ti_sci_msg_hdr resp;
1222
1223 struct ti_sci_xfer xfer;
1224 int ret;
1225
1226 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
1227 &req, sizeof(req),
1228 &resp, sizeof(resp),
1229 &xfer);
1230 if (ret) {
1231 ERROR("Message alloc failed (%d)\n", ret);
1232 return ret;
1233 }
1234
1235 req.processor_id = proc_id;
1236
1237 ret = ti_sci_do_xfer(&xfer);
1238 if (ret) {
1239 ERROR("Transfer send failed (%d)\n", ret);
1240 return ret;
1241 }
1242
1243 return 0;
1244 }
1245
1246 /**
1247 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1248 * the processor's access control list.
1249 *
1250 * @proc_id: Processor ID this request is for
1251 * @host_id: Host ID to get the control of the processor
1252 *
1253 * Return: 0 if all goes well, else appropriate error message
1254 */
ti_sci_proc_handover(uint8_t proc_id,uint8_t host_id)1255 int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1256 {
1257 struct ti_sci_msg_req_proc_handover req;
1258 struct ti_sci_msg_hdr resp;
1259
1260 struct ti_sci_xfer xfer;
1261 int ret;
1262
1263 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
1264 &req, sizeof(req),
1265 &resp, sizeof(resp),
1266 &xfer);
1267 if (ret) {
1268 ERROR("Message alloc failed (%d)\n", ret);
1269 return ret;
1270 }
1271
1272 req.processor_id = proc_id;
1273 req.host_id = host_id;
1274
1275 ret = ti_sci_do_xfer(&xfer);
1276 if (ret) {
1277 ERROR("Transfer send failed (%d)\n", ret);
1278 return ret;
1279 }
1280
1281 return 0;
1282 }
1283
1284 /**
1285 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1286 *
1287 * @proc_id: Processor ID this request is for
1288 * @config_flags_set: Configuration flags to be set
1289 * @config_flags_clear: Configuration flags to be cleared
1290 *
1291 * Return: 0 if all goes well, else appropriate error message
1292 */
ti_sci_proc_set_boot_cfg(uint8_t proc_id,uint64_t bootvector,uint32_t config_flags_set,uint32_t config_flags_clear)1293 int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1294 uint32_t config_flags_set,
1295 uint32_t config_flags_clear)
1296 {
1297 struct ti_sci_msg_req_set_proc_boot_config req;
1298 struct ti_sci_msg_hdr resp;
1299
1300 struct ti_sci_xfer xfer;
1301 int ret;
1302
1303 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
1304 &req, sizeof(req),
1305 &resp, sizeof(resp),
1306 &xfer);
1307 if (ret) {
1308 ERROR("Message alloc failed (%d)\n", ret);
1309 return ret;
1310 }
1311
1312 req.processor_id = proc_id;
1313 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1314 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1315 TISCI_ADDR_HIGH_SHIFT;
1316 req.config_flags_set = config_flags_set;
1317 req.config_flags_clear = config_flags_clear;
1318
1319 ret = ti_sci_do_xfer(&xfer);
1320 if (ret) {
1321 ERROR("Transfer send failed (%d)\n", ret);
1322 return ret;
1323 }
1324
1325 return 0;
1326 }
1327
1328 /**
1329 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1330 *
1331 * @proc_id: Processor ID this request is for
1332 * @control_flags_set: Control flags to be set
1333 * @control_flags_clear: Control flags to be cleared
1334 *
1335 * Return: 0 if all goes well, else appropriate error message
1336 */
ti_sci_proc_set_boot_ctrl(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1337 int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1338 uint32_t control_flags_clear)
1339 {
1340 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1341 struct ti_sci_msg_hdr resp;
1342
1343 struct ti_sci_xfer xfer;
1344 int ret;
1345
1346 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1347 &req, sizeof(req),
1348 &resp, sizeof(resp),
1349 &xfer);
1350 if (ret) {
1351 ERROR("Message alloc failed (%d)\n", ret);
1352 return ret;
1353 }
1354
1355 req.processor_id = proc_id;
1356 req.control_flags_set = control_flags_set;
1357 req.control_flags_clear = control_flags_clear;
1358
1359 ret = ti_sci_do_xfer(&xfer);
1360 if (ret) {
1361 ERROR("Transfer send failed (%d)\n", ret);
1362 return ret;
1363 }
1364
1365 return 0;
1366 }
1367
1368 /**
1369 * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1370 * without requesting or waiting for a
1371 * response.
1372 *
1373 * @proc_id: Processor ID this request is for
1374 * @control_flags_set: Control flags to be set
1375 * @control_flags_clear: Control flags to be cleared
1376 *
1377 * Return: 0 if all goes well, else appropriate error message
1378 */
ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1379 int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1380 uint32_t control_flags_set,
1381 uint32_t control_flags_clear)
1382 {
1383 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1384 struct ti_sci_msg_hdr *hdr;
1385 struct k3_sec_proxy_msg tx_message;
1386 int ret;
1387
1388 /* Ensure we have sane transfer size */
1389 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
1390 return -ERANGE;
1391
1392 hdr = (struct ti_sci_msg_hdr *)&req;
1393 hdr->seq = ++message_sequence;
1394 hdr->type = TISCI_MSG_SET_PROC_BOOT_CTRL;
1395 hdr->host = TI_SCI_HOST_ID;
1396 /* Setup with NORESPONSE flag to keep response queue clean */
1397 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
1398
1399 req.processor_id = proc_id;
1400 req.control_flags_set = control_flags_set;
1401 req.control_flags_clear = control_flags_clear;
1402
1403 tx_message.buf = (uint8_t *)&req;
1404 tx_message.len = sizeof(req);
1405
1406 /* Send message */
1407 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
1408 if (ret) {
1409 ERROR("Message sending failed (%d)\n", ret);
1410 return ret;
1411 }
1412
1413 /* Return without waiting for response */
1414 return 0;
1415 }
1416
1417 /**
1418 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1419 * processor configuration flags
1420 *
1421 * @proc_id: Processor ID this request is for
1422 * @cert_addr: Memory address at which payload image certificate is located
1423 *
1424 * Return: 0 if all goes well, else appropriate error message
1425 */
ti_sci_proc_auth_boot_image(uint8_t proc_id,uint64_t cert_addr)1426 int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1427 {
1428 struct ti_sci_msg_req_proc_auth_boot_image req;
1429 struct ti_sci_msg_hdr resp;
1430
1431 struct ti_sci_xfer xfer;
1432 int ret;
1433
1434 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMIAGE, 0,
1435 &req, sizeof(req),
1436 &resp, sizeof(resp),
1437 &xfer);
1438 if (ret) {
1439 ERROR("Message alloc failed (%d)\n", ret);
1440 return ret;
1441 }
1442
1443 req.processor_id = proc_id;
1444 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1445 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1446 TISCI_ADDR_HIGH_SHIFT;
1447
1448 ret = ti_sci_do_xfer(&xfer);
1449 if (ret) {
1450 ERROR("Transfer send failed (%d)\n", ret);
1451 return ret;
1452 }
1453
1454 return 0;
1455 }
1456
1457 /**
1458 * ti_sci_proc_get_boot_status() - Get the processor boot status
1459 *
1460 * @proc_id: Processor ID this request is for
1461 *
1462 * Return: 0 if all goes well, else appropriate error message
1463 */
ti_sci_proc_get_boot_status(uint8_t proc_id,uint64_t * bv,uint32_t * cfg_flags,uint32_t * ctrl_flags,uint32_t * sts_flags)1464 int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1465 uint32_t *cfg_flags,
1466 uint32_t *ctrl_flags,
1467 uint32_t *sts_flags)
1468 {
1469 struct ti_sci_msg_req_get_proc_boot_status req;
1470 struct ti_sci_msg_resp_get_proc_boot_status resp;
1471
1472 struct ti_sci_xfer xfer;
1473 int ret;
1474
1475 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
1476 &req, sizeof(req),
1477 &resp, sizeof(resp),
1478 &xfer);
1479 if (ret) {
1480 ERROR("Message alloc failed (%d)\n", ret);
1481 return ret;
1482 }
1483
1484 req.processor_id = proc_id;
1485
1486 ret = ti_sci_do_xfer(&xfer);
1487 if (ret) {
1488 ERROR("Transfer send failed (%d)\n", ret);
1489 return ret;
1490 }
1491
1492 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1493 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1494 TISCI_ADDR_HIGH_MASK);
1495 *cfg_flags = resp.config_flags;
1496 *ctrl_flags = resp.control_flags;
1497 *sts_flags = resp.status_flags;
1498
1499 return 0;
1500 }
1501
1502 /**
1503 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1504 *
1505 * @proc_id: Processor ID this request is for
1506 * @num_wait_iterations Total number of iterations we will check before
1507 * we will timeout and give up
1508 * @num_match_iterations How many iterations should we have continued
1509 * status to account for status bits glitching.
1510 * This is to make sure that match occurs for
1511 * consecutive checks. This implies that the
1512 * worst case should consider that the stable
1513 * time should at the worst be num_wait_iterations
1514 * num_match_iterations to prevent timeout.
1515 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1516 * between each status checks. This is the minimum
1517 * duration, and overhead of register reads and
1518 * checks are on top of this and can vary based on
1519 * varied conditions.
1520 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1521 * before the very first check in the first
1522 * iteration of status check loop. This is the
1523 * minimum duration, and overhead of register
1524 * reads and checks are.
1525 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1526 * status matching this field requested MUST be 1.
1527 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1528 * bits matching this field requested MUST be 1.
1529 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1530 * status matching this field requested MUST be 0.
1531 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1532 * bits matching this field requested MUST be 0.
1533 *
1534 * Return: 0 if all goes well, else appropriate error message
1535 */
ti_sci_proc_wait_boot_status(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1536 int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1537 uint8_t num_match_iterations,
1538 uint8_t delay_per_iteration_us,
1539 uint8_t delay_before_iterations_us,
1540 uint32_t status_flags_1_set_all_wait,
1541 uint32_t status_flags_1_set_any_wait,
1542 uint32_t status_flags_1_clr_all_wait,
1543 uint32_t status_flags_1_clr_any_wait)
1544 {
1545 struct ti_sci_msg_req_wait_proc_boot_status req;
1546 struct ti_sci_msg_hdr resp;
1547
1548 struct ti_sci_xfer xfer;
1549 int ret;
1550
1551 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1552 &req, sizeof(req),
1553 &resp, sizeof(resp),
1554 &xfer);
1555 if (ret) {
1556 ERROR("Message alloc failed (%d)\n", ret);
1557 return ret;
1558 }
1559
1560 req.processor_id = proc_id;
1561 req.num_wait_iterations = num_wait_iterations;
1562 req.num_match_iterations = num_match_iterations;
1563 req.delay_per_iteration_us = delay_per_iteration_us;
1564 req.delay_before_iterations_us = delay_before_iterations_us;
1565 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1566 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1567 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1568 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1569
1570 ret = ti_sci_do_xfer(&xfer);
1571 if (ret) {
1572 ERROR("Transfer send failed (%d)\n", ret);
1573 return ret;
1574 }
1575
1576 return 0;
1577 }
1578
1579 /**
1580 * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1581 * without requesting or waiting for
1582 * a response.
1583 *
1584 * @proc_id: Processor ID this request is for
1585 * @num_wait_iterations Total number of iterations we will check before
1586 * we will timeout and give up
1587 * @num_match_iterations How many iterations should we have continued
1588 * status to account for status bits glitching.
1589 * This is to make sure that match occurs for
1590 * consecutive checks. This implies that the
1591 * worst case should consider that the stable
1592 * time should at the worst be num_wait_iterations
1593 * num_match_iterations to prevent timeout.
1594 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1595 * between each status checks. This is the minimum
1596 * duration, and overhead of register reads and
1597 * checks are on top of this and can vary based on
1598 * varied conditions.
1599 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1600 * before the very first check in the first
1601 * iteration of status check loop. This is the
1602 * minimum duration, and overhead of register
1603 * reads and checks are.
1604 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1605 * status matching this field requested MUST be 1.
1606 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1607 * bits matching this field requested MUST be 1.
1608 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1609 * status matching this field requested MUST be 0.
1610 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1611 * bits matching this field requested MUST be 0.
1612 *
1613 * Return: 0 if all goes well, else appropriate error message
1614 */
ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1615 int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1616 uint8_t num_wait_iterations,
1617 uint8_t num_match_iterations,
1618 uint8_t delay_per_iteration_us,
1619 uint8_t delay_before_iterations_us,
1620 uint32_t status_flags_1_set_all_wait,
1621 uint32_t status_flags_1_set_any_wait,
1622 uint32_t status_flags_1_clr_all_wait,
1623 uint32_t status_flags_1_clr_any_wait)
1624 {
1625 struct ti_sci_msg_req_wait_proc_boot_status req;
1626 struct ti_sci_msg_hdr *hdr;
1627 struct k3_sec_proxy_msg tx_message;
1628 int ret;
1629
1630 /* Ensure we have sane transfer size */
1631 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
1632 return -ERANGE;
1633
1634 hdr = (struct ti_sci_msg_hdr *)&req;
1635 hdr->seq = ++message_sequence;
1636 hdr->type = TISCI_MSG_WAIT_PROC_BOOT_STATUS;
1637 hdr->host = TI_SCI_HOST_ID;
1638 /* Setup with NORESPONSE flag to keep response queue clean */
1639 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
1640
1641 req.processor_id = proc_id;
1642 req.num_wait_iterations = num_wait_iterations;
1643 req.num_match_iterations = num_match_iterations;
1644 req.delay_per_iteration_us = delay_per_iteration_us;
1645 req.delay_before_iterations_us = delay_before_iterations_us;
1646 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1647 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1648 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1649 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1650
1651 tx_message.buf = (uint8_t *)&req;
1652 tx_message.len = sizeof(req);
1653
1654 /* Send message */
1655 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
1656 if (ret) {
1657 ERROR("Message sending failed (%d)\n", ret);
1658 return ret;
1659 }
1660
1661 /* Return without waiting for response */
1662 return 0;
1663 }
1664
1665 /**
1666 * ti_sci_init() - Basic initialization
1667 *
1668 * Return: 0 if all goes well, else appropriate error message
1669 */
ti_sci_init(void)1670 int ti_sci_init(void)
1671 {
1672 struct ti_sci_msg_resp_version rev_info;
1673 int ret;
1674
1675 ret = ti_sci_get_revision(&rev_info);
1676 if (ret) {
1677 ERROR("Unable to communicate with control firmware (%d)\n", ret);
1678 return ret;
1679 }
1680
1681 INFO("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
1682 rev_info.abi_major, rev_info.abi_minor,
1683 rev_info.firmware_revision,
1684 rev_info.firmware_description);
1685
1686 return 0;
1687 }
1688