1 /*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
10 #include <errno.h>
11 #include <stdbool.h>
12 #include <stddef.h>
13 #include <string.h>
14
15 #include <platform_def.h>
16
17 #include <common/debug.h>
18 #include <sec_proxy.h>
19
20 #include "ti_sci_protocol.h"
21 #include "ti_sci.h"
22
23 #if USE_COHERENT_MEM
24 __section("tzfw_coherent_mem")
25 #endif
26 static uint8_t message_sequence;
27
28 /**
29 * struct ti_sci_xfer - Structure representing a message flow
30 * @tx_message: Transmit message
31 * @rx_message: Receive message
32 */
33 struct ti_sci_xfer {
34 struct k3_sec_proxy_msg tx_message;
35 struct k3_sec_proxy_msg rx_message;
36 };
37
38 /**
39 * ti_sci_setup_one_xfer() - Setup one message type
40 *
41 * @msg_type: Message type
42 * @msg_flags: Flag to set for the message
43 * @tx_buf: Buffer to be sent to mailbox channel
44 * @tx_message_size: transmit message size
45 * @rx_buf: Buffer to be received from mailbox channel
46 * @rx_message_size: receive message size
47 *
48 * Helper function which is used by various command functions that are
49 * exposed to clients of this driver for allocating a message traffic event.
50 *
51 * Return: 0 if all goes well, else appropriate error message
52 */
ti_sci_setup_one_xfer(uint16_t msg_type,uint32_t msg_flags,void * tx_buf,size_t tx_message_size,void * rx_buf,size_t rx_message_size,struct ti_sci_xfer * xfer)53 static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
54 void *tx_buf,
55 size_t tx_message_size,
56 void *rx_buf,
57 size_t rx_message_size,
58 struct ti_sci_xfer *xfer)
59 {
60 struct ti_sci_msg_hdr *hdr;
61
62 /* Ensure we have sane transfer sizes */
63 if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
64 tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
65 rx_message_size < sizeof(*hdr) ||
66 tx_message_size < sizeof(*hdr))
67 return -ERANGE;
68
69 hdr = (struct ti_sci_msg_hdr *)tx_buf;
70 hdr->seq = ++message_sequence;
71 hdr->type = msg_type;
72 hdr->host = TI_SCI_HOST_ID;
73 hdr->flags = msg_flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
74
75 xfer->tx_message.buf = tx_buf;
76 xfer->tx_message.len = tx_message_size;
77
78 xfer->rx_message.buf = rx_buf;
79 xfer->rx_message.len = rx_message_size;
80
81 return 0;
82 }
83
84 /**
85 * ti_sci_get_response() - Receive response from mailbox channel
86 *
87 * @xfer: Transfer to initiate and wait for response
88 * @chan: Channel to receive the response
89 *
90 * Return: 0 if all goes well, else appropriate error message
91 */
ti_sci_get_response(struct ti_sci_xfer * xfer,enum k3_sec_proxy_chan_id chan)92 static inline int ti_sci_get_response(struct ti_sci_xfer *xfer,
93 enum k3_sec_proxy_chan_id chan)
94 {
95 struct k3_sec_proxy_msg *msg = &xfer->rx_message;
96 struct ti_sci_msg_hdr *hdr;
97 unsigned int retry = 5;
98 int ret;
99
100 for (; retry > 0; retry--) {
101 /* Receive the response */
102 ret = k3_sec_proxy_recv(chan, msg);
103 if (ret) {
104 ERROR("Message receive failed (%d)\n", ret);
105 return ret;
106 }
107
108 /* msg is updated by Secure Proxy driver */
109 hdr = (struct ti_sci_msg_hdr *)msg->buf;
110
111 /* Sanity check for message response */
112 if (hdr->seq == message_sequence)
113 break;
114 else
115 WARN("Message with sequence ID %u is not expected\n", hdr->seq);
116 }
117 if (!retry) {
118 ERROR("Timed out waiting for message\n");
119 return -EINVAL;
120 }
121
122 if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
123 ERROR("Unable to handle %lu xfer (max %d)\n",
124 msg->len, TI_SCI_MAX_MESSAGE_SIZE);
125 return -EINVAL;
126 }
127
128 if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
129 return -ENODEV;
130
131 return 0;
132 }
133
134 /**
135 * ti_sci_do_xfer() - Do one transfer
136 *
137 * @xfer: Transfer to initiate and wait for response
138 *
139 * Return: 0 if all goes well, else appropriate error message
140 */
ti_sci_do_xfer(struct ti_sci_xfer * xfer)141 static inline int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
142 {
143 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
144 int ret;
145
146 /* Clear any spurious messages in receive queue */
147 ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE);
148 if (ret) {
149 ERROR("Could not clear response queue (%d)\n", ret);
150 return ret;
151 }
152
153 /* Send the message */
154 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, msg);
155 if (ret) {
156 ERROR("Message sending failed (%d)\n", ret);
157 return ret;
158 }
159
160 /* Get the response */
161 ret = ti_sci_get_response(xfer, SP_RESPONSE);
162 if (ret) {
163 ERROR("Failed to get response (%d)\n", ret);
164 return ret;
165 }
166
167 return 0;
168 }
169
170 /**
171 * ti_sci_get_revision() - Get the revision of the SCI entity
172 *
173 * Updates the SCI information in the internal data structure.
174 *
175 * Return: 0 if all goes well, else appropriate error message
176 */
ti_sci_get_revision(struct ti_sci_msg_resp_version * rev_info)177 int ti_sci_get_revision(struct ti_sci_msg_resp_version *rev_info)
178 {
179 struct ti_sci_msg_hdr hdr;
180 struct ti_sci_xfer xfer;
181 int ret;
182
183 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
184 &hdr, sizeof(hdr),
185 rev_info, sizeof(*rev_info),
186 &xfer);
187 if (ret) {
188 ERROR("Message alloc failed (%d)\n", ret);
189 return ret;
190 }
191
192 ret = ti_sci_do_xfer(&xfer);
193 if (ret) {
194 ERROR("Transfer send failed (%d)\n", ret);
195 return ret;
196 }
197
198 return 0;
199 }
200
201 /**
202 * ti_sci_device_set_state() - Set device state
203 *
204 * @id: Device identifier
205 * @flags: flags to setup for the device
206 * @state: State to move the device to
207 *
208 * Return: 0 if all goes well, else appropriate error message
209 */
ti_sci_device_set_state(uint32_t id,uint32_t flags,uint8_t state)210 static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
211 {
212 struct ti_sci_msg_req_set_device_state req;
213 struct ti_sci_msg_hdr resp;
214
215 struct ti_sci_xfer xfer;
216 int ret;
217
218 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
219 &req, sizeof(req),
220 &resp, sizeof(resp),
221 &xfer);
222 if (ret) {
223 ERROR("Message alloc failed (%d)\n", ret);
224 return ret;
225 }
226
227 req.id = id;
228 req.state = state;
229
230 ret = ti_sci_do_xfer(&xfer);
231 if (ret) {
232 ERROR("Transfer send failed (%d)\n", ret);
233 return ret;
234 }
235
236 return 0;
237 }
238
239 /**
240 * ti_sci_device_get_state() - Get device state
241 *
242 * @id: Device Identifier
243 * @clcnt: Pointer to Context Loss Count
244 * @resets: pointer to resets
245 * @p_state: pointer to p_state
246 * @c_state: pointer to c_state
247 *
248 * Return: 0 if all goes well, else appropriate error message
249 */
ti_sci_device_get_state(uint32_t id,uint32_t * clcnt,uint32_t * resets,uint8_t * p_state,uint8_t * c_state)250 static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
251 uint32_t *resets, uint8_t *p_state,
252 uint8_t *c_state)
253 {
254 struct ti_sci_msg_req_get_device_state req;
255 struct ti_sci_msg_resp_get_device_state resp;
256
257 struct ti_sci_xfer xfer;
258 int ret;
259
260 if (!clcnt && !resets && !p_state && !c_state)
261 return -EINVAL;
262
263 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
264 &req, sizeof(req),
265 &resp, sizeof(resp),
266 &xfer);
267 if (ret) {
268 ERROR("Message alloc failed (%d)\n", ret);
269 return ret;
270 }
271
272 req.id = id;
273
274 ret = ti_sci_do_xfer(&xfer);
275 if (ret) {
276 ERROR("Transfer send failed (%d)\n", ret);
277 return ret;
278 }
279
280 if (clcnt)
281 *clcnt = resp.context_loss_count;
282 if (resets)
283 *resets = resp.resets;
284 if (p_state)
285 *p_state = resp.programmed_state;
286 if (c_state)
287 *c_state = resp.current_state;
288
289 return 0;
290 }
291
292 /**
293 * ti_sci_device_get() - Request for device managed by TISCI
294 *
295 * @id: Device Identifier
296 *
297 * Request for the device - NOTE: the client MUST maintain integrity of
298 * usage count by balancing get_device with put_device. No refcounting is
299 * managed by driver for that purpose.
300 *
301 * Return: 0 if all goes well, else appropriate error message
302 */
ti_sci_device_get(uint32_t id)303 int ti_sci_device_get(uint32_t id)
304 {
305 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
306 }
307
308 /**
309 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
310 *
311 * @id: Device Identifier
312 *
313 * Request for the device - NOTE: the client MUST maintain integrity of
314 * usage count by balancing get_device with put_device. No refcounting is
315 * managed by driver for that purpose.
316 *
317 * NOTE: This _exclusive version of the get API is for exclusive access to the
318 * device. Any other host in the system will fail to get this device after this
319 * call until exclusive access is released with device_put or a non-exclusive
320 * set call.
321 *
322 * Return: 0 if all goes well, else appropriate error message
323 */
ti_sci_device_get_exclusive(uint32_t id)324 int ti_sci_device_get_exclusive(uint32_t id)
325 {
326 return ti_sci_device_set_state(id,
327 MSG_FLAG_DEVICE_EXCLUSIVE,
328 MSG_DEVICE_SW_STATE_ON);
329 }
330
331 /**
332 * ti_sci_device_idle() - Idle a device managed by TISCI
333 *
334 * @id: Device Identifier
335 *
336 * Request for the device - NOTE: the client MUST maintain integrity of
337 * usage count by balancing get_device with put_device. No refcounting is
338 * managed by driver for that purpose.
339 *
340 * Return: 0 if all goes well, else appropriate error message
341 */
ti_sci_device_idle(uint32_t id)342 int ti_sci_device_idle(uint32_t id)
343 {
344 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
345 }
346
347 /**
348 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
349 *
350 * @id: Device Identifier
351 *
352 * Request for the device - NOTE: the client MUST maintain integrity of
353 * usage count by balancing get_device with put_device. No refcounting is
354 * managed by driver for that purpose.
355 *
356 * NOTE: This _exclusive version of the idle API is for exclusive access to
357 * the device. Any other host in the system will fail to get this device after
358 * this call until exclusive access is released with device_put or a
359 * non-exclusive set call.
360 *
361 * Return: 0 if all goes well, else appropriate error message
362 */
ti_sci_device_idle_exclusive(uint32_t id)363 int ti_sci_device_idle_exclusive(uint32_t id)
364 {
365 return ti_sci_device_set_state(id,
366 MSG_FLAG_DEVICE_EXCLUSIVE,
367 MSG_DEVICE_SW_STATE_RETENTION);
368 }
369
370 /**
371 * ti_sci_device_put() - Release a device managed by TISCI
372 *
373 * @id: Device Identifier
374 *
375 * Request for the device - NOTE: the client MUST maintain integrity of
376 * usage count by balancing get_device with put_device. No refcounting is
377 * managed by driver for that purpose.
378 *
379 * Return: 0 if all goes well, else appropriate error message
380 */
ti_sci_device_put(uint32_t id)381 int ti_sci_device_put(uint32_t id)
382 {
383 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
384 }
385
386 /**
387 * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
388 * for a response.
389 *
390 * @id: Device Identifier
391 *
392 * Request for the device - NOTE: the client MUST maintain integrity of
393 * usage count by balancing get_device with put_device. No refcounting is
394 * managed by driver for that purpose.
395 *
396 * Return: 0 if all goes well, else appropriate error message
397 */
ti_sci_device_put_no_wait(uint32_t id)398 int ti_sci_device_put_no_wait(uint32_t id)
399 {
400 struct ti_sci_msg_req_set_device_state req;
401 struct ti_sci_msg_hdr *hdr;
402 struct k3_sec_proxy_msg tx_message;
403 int ret;
404
405 /* Ensure we have sane transfer size */
406 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
407 return -ERANGE;
408
409 hdr = (struct ti_sci_msg_hdr *)&req;
410 hdr->seq = ++message_sequence;
411 hdr->type = TI_SCI_MSG_SET_DEVICE_STATE;
412 hdr->host = TI_SCI_HOST_ID;
413 /* Setup with NORESPONSE flag to keep response queue clean */
414 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
415
416 req.id = id;
417 req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
418
419 tx_message.buf = (uint8_t *)&req;
420 tx_message.len = sizeof(req);
421
422 /* Send message */
423 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
424 if (ret) {
425 ERROR("Message sending failed (%d)\n", ret);
426 return ret;
427 }
428
429 /* Return without waiting for response */
430 return 0;
431 }
432
433 /**
434 * ti_sci_device_is_valid() - Is the device valid
435 *
436 * @id: Device Identifier
437 *
438 * Return: 0 if all goes well and the device ID is valid, else return
439 * appropriate error
440 */
ti_sci_device_is_valid(uint32_t id)441 int ti_sci_device_is_valid(uint32_t id)
442 {
443 uint8_t unused;
444
445 /* check the device state which will also tell us if the ID is valid */
446 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
447 }
448
449 /**
450 * ti_sci_device_get_clcnt() - Get context loss counter
451 *
452 * @id: Device Identifier
453 * @count: Pointer to Context Loss counter to populate
454 *
455 * Return: 0 if all goes well, else appropriate error message
456 */
ti_sci_device_get_clcnt(uint32_t id,uint32_t * count)457 int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
458 {
459 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
460 }
461
462 /**
463 * ti_sci_device_is_idle() - Check if the device is requested to be idle
464 *
465 * @id: Device Identifier
466 * @r_state: true if requested to be idle
467 *
468 * Return: 0 if all goes well, else appropriate error message
469 */
ti_sci_device_is_idle(uint32_t id,bool * r_state)470 int ti_sci_device_is_idle(uint32_t id, bool *r_state)
471 {
472 int ret;
473 uint8_t state;
474
475 if (!r_state)
476 return -EINVAL;
477
478 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
479 if (ret)
480 return ret;
481
482 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
483
484 return 0;
485 }
486
487 /**
488 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
489 *
490 * @id: Device Identifier
491 * @r_state: true if requested to be stopped
492 * @curr_state: true if currently stopped
493 *
494 * Return: 0 if all goes well, else appropriate error message
495 */
ti_sci_device_is_stop(uint32_t id,bool * r_state,bool * curr_state)496 int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
497 {
498 int ret;
499 uint8_t p_state, c_state;
500
501 if (!r_state && !curr_state)
502 return -EINVAL;
503
504 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
505 if (ret)
506 return ret;
507
508 if (r_state)
509 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
510 if (curr_state)
511 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
512
513 return 0;
514 }
515
516 /**
517 * ti_sci_device_is_on() - Check if the device is requested to be ON
518 *
519 * @id: Device Identifier
520 * @r_state: true if requested to be ON
521 * @curr_state: true if currently ON and active
522 *
523 * Return: 0 if all goes well, else appropriate error message
524 */
ti_sci_device_is_on(uint32_t id,bool * r_state,bool * curr_state)525 int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
526 {
527 int ret;
528 uint8_t p_state, c_state;
529
530 if (!r_state && !curr_state)
531 return -EINVAL;
532
533 ret =
534 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
535 if (ret)
536 return ret;
537
538 if (r_state)
539 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
540 if (curr_state)
541 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
542
543 return 0;
544 }
545
546 /**
547 * ti_sci_device_is_trans() - Check if the device is currently transitioning
548 *
549 * @id: Device Identifier
550 * @curr_state: true if currently transitioning
551 *
552 * Return: 0 if all goes well, else appropriate error message
553 */
ti_sci_device_is_trans(uint32_t id,bool * curr_state)554 int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
555 {
556 int ret;
557 uint8_t state;
558
559 if (!curr_state)
560 return -EINVAL;
561
562 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
563 if (ret)
564 return ret;
565
566 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
567
568 return 0;
569 }
570
571 /**
572 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
573 *
574 * @id: Device Identifier
575 * @reset_state: Device specific reset bit field
576 *
577 * Return: 0 if all goes well, else appropriate error message
578 */
ti_sci_device_set_resets(uint32_t id,uint32_t reset_state)579 int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
580 {
581 struct ti_sci_msg_req_set_device_resets req;
582 struct ti_sci_msg_hdr resp;
583
584 struct ti_sci_xfer xfer;
585 int ret;
586
587 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
588 &req, sizeof(req),
589 &resp, sizeof(resp),
590 &xfer);
591 if (ret) {
592 ERROR("Message alloc failed (%d)\n", ret);
593 return ret;
594 }
595
596 req.id = id;
597 req.resets = reset_state;
598
599 ret = ti_sci_do_xfer(&xfer);
600 if (ret) {
601 ERROR("Transfer send failed (%d)\n", ret);
602 return ret;
603 }
604
605 return 0;
606 }
607
608 /**
609 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
610 *
611 * @id: Device Identifier
612 * @reset_state: Pointer to reset state to populate
613 *
614 * Return: 0 if all goes well, else appropriate error message
615 */
ti_sci_device_get_resets(uint32_t id,uint32_t * reset_state)616 int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
617 {
618 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
619 }
620
621 /**
622 * ti_sci_clock_set_state() - Set clock state helper
623 *
624 * @dev_id: Device identifier this request is for
625 * @clk_id: Clock identifier for the device for this request,
626 * Each device has its own set of clock inputs, This indexes
627 * which clock input to modify
628 * @flags: Header flags as needed
629 * @state: State to request for the clock
630 *
631 * Return: 0 if all goes well, else appropriate error message
632 */
ti_sci_clock_set_state(uint32_t dev_id,uint8_t clk_id,uint32_t flags,uint8_t state)633 int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
634 uint32_t flags, uint8_t state)
635 {
636 struct ti_sci_msg_req_set_clock_state req;
637 struct ti_sci_msg_hdr resp;
638
639 struct ti_sci_xfer xfer;
640 int ret;
641
642 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
643 &req, sizeof(req),
644 &resp, sizeof(resp),
645 &xfer);
646 if (ret) {
647 ERROR("Message alloc failed (%d)\n", ret);
648 return ret;
649 }
650
651 req.dev_id = dev_id;
652 req.clk_id = clk_id;
653 req.request_state = state;
654
655 ret = ti_sci_do_xfer(&xfer);
656 if (ret) {
657 ERROR("Transfer send failed (%d)\n", ret);
658 return ret;
659 }
660
661 return 0;
662 }
663
664 /**
665 * ti_sci_clock_get_state() - Get clock state helper
666 *
667 * @dev_id: Device identifier this request is for
668 * @clk_id: Clock identifier for the device for this request.
669 * Each device has its own set of clock inputs. This indexes
670 * which clock input to modify.
671 * @programmed_state: State requested for clock to move to
672 * @current_state: State that the clock is currently in
673 *
674 * Return: 0 if all goes well, else appropriate error message
675 */
ti_sci_clock_get_state(uint32_t dev_id,uint8_t clk_id,uint8_t * programmed_state,uint8_t * current_state)676 int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
677 uint8_t *programmed_state,
678 uint8_t *current_state)
679 {
680 struct ti_sci_msg_req_get_clock_state req;
681 struct ti_sci_msg_resp_get_clock_state resp;
682
683 struct ti_sci_xfer xfer;
684 int ret;
685
686 if (!programmed_state && !current_state)
687 return -EINVAL;
688
689 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
690 &req, sizeof(req),
691 &resp, sizeof(resp),
692 &xfer);
693 if (ret) {
694 ERROR("Message alloc failed (%d)\n", ret);
695 return ret;
696 }
697
698 req.dev_id = dev_id;
699 req.clk_id = clk_id;
700
701 ret = ti_sci_do_xfer(&xfer);
702 if (ret) {
703 ERROR("Transfer send failed (%d)\n", ret);
704 return ret;
705 }
706
707 if (programmed_state)
708 *programmed_state = resp.programmed_state;
709 if (current_state)
710 *current_state = resp.current_state;
711
712 return 0;
713 }
714
715 /**
716 * ti_sci_clock_get() - Get control of a clock from TI SCI
717
718 * @dev_id: Device identifier this request is for
719 * @clk_id: Clock identifier for the device for this request.
720 * Each device has its own set of clock inputs. This indexes
721 * which clock input to modify.
722 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
723 * @can_change_freq: 'true' iff frequency change is desired
724 * @enable_input_term: 'true' iff input termination is desired
725 *
726 * Return: 0 if all goes well, else appropriate error message
727 */
ti_sci_clock_get(uint32_t dev_id,uint8_t clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)728 int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
729 bool needs_ssc, bool can_change_freq,
730 bool enable_input_term)
731 {
732 uint32_t flags = 0;
733
734 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
735 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
736 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
737
738 return ti_sci_clock_set_state(dev_id, clk_id, flags,
739 MSG_CLOCK_SW_STATE_REQ);
740 }
741
742 /**
743 * ti_sci_clock_idle() - Idle a clock which is in our control
744
745 * @dev_id: Device identifier this request is for
746 * @clk_id: Clock identifier for the device for this request.
747 * Each device has its own set of clock inputs. This indexes
748 * which clock input to modify.
749 *
750 * NOTE: This clock must have been requested by get_clock previously.
751 *
752 * Return: 0 if all goes well, else appropriate error message
753 */
ti_sci_clock_idle(uint32_t dev_id,uint8_t clk_id)754 int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
755 {
756 return ti_sci_clock_set_state(dev_id, clk_id, 0,
757 MSG_CLOCK_SW_STATE_UNREQ);
758 }
759
760 /**
761 * ti_sci_clock_put() - Release a clock from our control
762 *
763 * @dev_id: Device identifier this request is for
764 * @clk_id: Clock identifier for the device for this request.
765 * Each device has its own set of clock inputs. This indexes
766 * which clock input to modify.
767 *
768 * NOTE: This clock must have been requested by get_clock previously.
769 *
770 * Return: 0 if all goes well, else appropriate error message
771 */
ti_sci_clock_put(uint32_t dev_id,uint8_t clk_id)772 int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
773 {
774 return ti_sci_clock_set_state(dev_id, clk_id, 0,
775 MSG_CLOCK_SW_STATE_AUTO);
776 }
777
778 /**
779 * ti_sci_clock_is_auto() - Is the clock being auto managed
780 *
781 * @dev_id: Device identifier this request is for
782 * @clk_id: Clock identifier for the device for this request.
783 * Each device has its own set of clock inputs. This indexes
784 * which clock input to modify.
785 * @req_state: state indicating if the clock is auto managed
786 *
787 * Return: 0 if all goes well, else appropriate error message
788 */
ti_sci_clock_is_auto(uint32_t dev_id,uint8_t clk_id,bool * req_state)789 int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
790 {
791 uint8_t state = 0;
792 int ret;
793
794 if (!req_state)
795 return -EINVAL;
796
797 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
798 if (ret)
799 return ret;
800
801 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
802
803 return 0;
804 }
805
806 /**
807 * ti_sci_clock_is_on() - Is the clock ON
808 *
809 * @dev_id: Device identifier this request is for
810 * @clk_id: Clock identifier for the device for this request.
811 * Each device has its own set of clock inputs. This indexes
812 * which clock input to modify.
813 * @req_state: state indicating if the clock is managed by us and enabled
814 * @curr_state: state indicating if the clock is ready for operation
815 *
816 * Return: 0 if all goes well, else appropriate error message
817 */
ti_sci_clock_is_on(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)818 int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
819 bool *req_state, bool *curr_state)
820 {
821 uint8_t c_state = 0, r_state = 0;
822 int ret;
823
824 if (!req_state && !curr_state)
825 return -EINVAL;
826
827 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
828 if (ret)
829 return ret;
830
831 if (req_state)
832 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
833 if (curr_state)
834 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
835
836 return 0;
837 }
838
839 /**
840 * ti_sci_clock_is_off() - Is the clock OFF
841 *
842 * @dev_id: Device identifier this request is for
843 * @clk_id: Clock identifier for the device for this request.
844 * Each device has its own set of clock inputs. This indexes
845 * which clock input to modify.
846 * @req_state: state indicating if the clock is managed by us and disabled
847 * @curr_state: state indicating if the clock is NOT ready for operation
848 *
849 * Return: 0 if all goes well, else appropriate error message
850 */
ti_sci_clock_is_off(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)851 int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
852 bool *req_state, bool *curr_state)
853 {
854 uint8_t c_state = 0, r_state = 0;
855 int ret;
856
857 if (!req_state && !curr_state)
858 return -EINVAL;
859
860 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
861 if (ret)
862 return ret;
863
864 if (req_state)
865 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
866 if (curr_state)
867 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
868
869 return 0;
870 }
871
872 /**
873 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
874 *
875 * @dev_id: Device identifier this request is for
876 * @clk_id: Clock identifier for the device for this request.
877 * Each device has its own set of clock inputs. This indexes
878 * which clock input to modify.
879 * @parent_id: Parent clock identifier to set
880 *
881 * Return: 0 if all goes well, else appropriate error message
882 */
ti_sci_clock_set_parent(uint32_t dev_id,uint8_t clk_id,uint8_t parent_id)883 int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
884 {
885 struct ti_sci_msg_req_set_clock_parent req;
886 struct ti_sci_msg_hdr resp;
887
888 struct ti_sci_xfer xfer;
889 int ret;
890
891 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
892 &req, sizeof(req),
893 &resp, sizeof(resp),
894 &xfer);
895 if (ret) {
896 ERROR("Message alloc failed (%d)\n", ret);
897 return ret;
898 }
899
900 req.dev_id = dev_id;
901 req.clk_id = clk_id;
902 req.parent_id = parent_id;
903
904 ret = ti_sci_do_xfer(&xfer);
905 if (ret) {
906 ERROR("Transfer send failed (%d)\n", ret);
907 return ret;
908 }
909
910 return 0;
911 }
912
913 /**
914 * ti_sci_clock_get_parent() - Get current parent clock source
915 *
916 * @dev_id: Device identifier this request is for
917 * @clk_id: Clock identifier for the device for this request.
918 * Each device has its own set of clock inputs. This indexes
919 * which clock input to modify.
920 * @parent_id: Current clock parent
921 *
922 * Return: 0 if all goes well, else appropriate error message
923 */
ti_sci_clock_get_parent(uint32_t dev_id,uint8_t clk_id,uint8_t * parent_id)924 int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
925 {
926 struct ti_sci_msg_req_get_clock_parent req;
927 struct ti_sci_msg_resp_get_clock_parent resp;
928
929 struct ti_sci_xfer xfer;
930 int ret;
931
932 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
933 &req, sizeof(req),
934 &resp, sizeof(resp),
935 &xfer);
936 if (ret) {
937 ERROR("Message alloc failed (%d)\n", ret);
938 return ret;
939 }
940
941 req.dev_id = dev_id;
942 req.clk_id = clk_id;
943
944 ret = ti_sci_do_xfer(&xfer);
945 if (ret) {
946 ERROR("Transfer send failed (%d)\n", ret);
947 return ret;
948 }
949
950 *parent_id = resp.parent_id;
951
952 return 0;
953 }
954
955 /**
956 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
957 *
958 * @dev_id: Device identifier this request is for
959 * @clk_id: Clock identifier for the device for this request.
960 * Each device has its own set of clock inputs. This indexes
961 * which clock input to modify.
962 * @num_parents: Returns he number of parents to the current clock.
963 *
964 * Return: 0 if all goes well, else appropriate error message
965 */
ti_sci_clock_get_num_parents(uint32_t dev_id,uint8_t clk_id,uint8_t * num_parents)966 int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
967 uint8_t *num_parents)
968 {
969 struct ti_sci_msg_req_get_clock_num_parents req;
970 struct ti_sci_msg_resp_get_clock_num_parents resp;
971
972 struct ti_sci_xfer xfer;
973 int ret;
974
975 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
976 &req, sizeof(req),
977 &resp, sizeof(resp),
978 &xfer);
979 if (ret) {
980 ERROR("Message alloc failed (%d)\n", ret);
981 return ret;
982 }
983
984 req.dev_id = dev_id;
985 req.clk_id = clk_id;
986
987 ret = ti_sci_do_xfer(&xfer);
988 if (ret) {
989 ERROR("Transfer send failed (%d)\n", ret);
990 return ret;
991 }
992
993 *num_parents = resp.num_parents;
994
995 return 0;
996 }
997
998 /**
999 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1000 *
1001 * @dev_id: Device identifier this request is for
1002 * @clk_id: Clock identifier for the device for this request.
1003 * Each device has its own set of clock inputs. This indexes
1004 * which clock input to modify.
1005 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1006 * allowable programmed frequency and does not account for clock
1007 * tolerances and jitter.
1008 * @target_freq: The target clock frequency in Hz. A frequency will be
1009 * processed as close to this target frequency as possible.
1010 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1011 * allowable programmed frequency and does not account for clock
1012 * tolerances and jitter.
1013 * @match_freq: Frequency match in Hz response.
1014 *
1015 * Return: 0 if all goes well, else appropriate error message
1016 */
ti_sci_clock_get_match_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq,uint64_t * match_freq)1017 int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1018 uint64_t min_freq, uint64_t target_freq,
1019 uint64_t max_freq, uint64_t *match_freq)
1020 {
1021 struct ti_sci_msg_req_query_clock_freq req;
1022 struct ti_sci_msg_resp_query_clock_freq resp;
1023
1024 struct ti_sci_xfer xfer;
1025 int ret;
1026
1027 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
1028 &req, sizeof(req),
1029 &resp, sizeof(resp),
1030 &xfer);
1031 if (ret) {
1032 ERROR("Message alloc failed (%d)\n", ret);
1033 return ret;
1034 }
1035
1036 req.dev_id = dev_id;
1037 req.clk_id = clk_id;
1038 req.min_freq_hz = min_freq;
1039 req.target_freq_hz = target_freq;
1040 req.max_freq_hz = max_freq;
1041
1042 ret = ti_sci_do_xfer(&xfer);
1043 if (ret) {
1044 ERROR("Transfer send failed (%d)\n", ret);
1045 return ret;
1046 }
1047
1048 *match_freq = resp.freq_hz;
1049
1050 return 0;
1051 }
1052
1053 /**
1054 * ti_sci_clock_set_freq() - Set a frequency for clock
1055 *
1056 * @dev_id: Device identifier this request is for
1057 * @clk_id: Clock identifier for the device for this request.
1058 * Each device has its own set of clock inputs. This indexes
1059 * which clock input to modify.
1060 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1061 * allowable programmed frequency and does not account for clock
1062 * tolerances and jitter.
1063 * @target_freq: The target clock frequency in Hz. A frequency will be
1064 * processed as close to this target frequency as possible.
1065 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1066 * allowable programmed frequency and does not account for clock
1067 * tolerances and jitter.
1068 *
1069 * Return: 0 if all goes well, else appropriate error message
1070 */
ti_sci_clock_set_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq)1071 int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1072 uint64_t target_freq, uint64_t max_freq)
1073 {
1074 struct ti_sci_msg_req_set_clock_freq req;
1075 struct ti_sci_msg_hdr resp;
1076
1077 struct ti_sci_xfer xfer;
1078 int ret;
1079
1080 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
1081 &req, sizeof(req),
1082 &resp, sizeof(resp),
1083 &xfer);
1084 if (ret) {
1085 ERROR("Message alloc failed (%d)\n", ret);
1086 return ret;
1087 }
1088 req.dev_id = dev_id;
1089 req.clk_id = clk_id;
1090 req.min_freq_hz = min_freq;
1091 req.target_freq_hz = target_freq;
1092 req.max_freq_hz = max_freq;
1093
1094 ret = ti_sci_do_xfer(&xfer);
1095 if (ret) {
1096 ERROR("Transfer send failed (%d)\n", ret);
1097 return ret;
1098 }
1099
1100 return 0;
1101 }
1102
1103 /**
1104 * ti_sci_clock_get_freq() - Get current frequency
1105 *
1106 * @dev_id: Device identifier this request is for
1107 * @clk_id: Clock identifier for the device for this request.
1108 * Each device has its own set of clock inputs. This indexes
1109 * which clock input to modify.
1110 * @freq: Currently frequency in Hz
1111 *
1112 * Return: 0 if all goes well, else appropriate error message
1113 */
ti_sci_clock_get_freq(uint32_t dev_id,uint8_t clk_id,uint64_t * freq)1114 int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1115 {
1116 struct ti_sci_msg_req_get_clock_freq req;
1117 struct ti_sci_msg_resp_get_clock_freq resp;
1118
1119 struct ti_sci_xfer xfer;
1120 int ret;
1121
1122 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
1123 &req, sizeof(req),
1124 &resp, sizeof(resp),
1125 &xfer);
1126 if (ret) {
1127 ERROR("Message alloc failed (%d)\n", ret);
1128 return ret;
1129 }
1130
1131 req.dev_id = dev_id;
1132 req.clk_id = clk_id;
1133
1134 ret = ti_sci_do_xfer(&xfer);
1135 if (ret) {
1136 ERROR("Transfer send failed (%d)\n", ret);
1137 return ret;
1138 }
1139
1140 *freq = resp.freq_hz;
1141
1142 return 0;
1143 }
1144
1145 /**
1146 * ti_sci_core_reboot() - Command to request system reset
1147 *
1148 * Return: 0 if all goes well, else appropriate error message
1149 */
ti_sci_core_reboot(void)1150 int ti_sci_core_reboot(void)
1151 {
1152 struct ti_sci_msg_req_reboot req;
1153 struct ti_sci_msg_hdr resp;
1154
1155 struct ti_sci_xfer xfer;
1156 int ret;
1157
1158 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
1159 &req, sizeof(req),
1160 &resp, sizeof(resp),
1161 &xfer);
1162 if (ret) {
1163 ERROR("Message alloc failed (%d)\n", ret);
1164 return ret;
1165 }
1166 req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET;
1167
1168 ret = ti_sci_do_xfer(&xfer);
1169 if (ret) {
1170 ERROR("Transfer send failed (%d)\n", ret);
1171 return ret;
1172 }
1173
1174 return 0;
1175 }
1176
1177 /**
1178 * ti_sci_proc_request() - Request a physical processor control
1179 *
1180 * @proc_id: Processor ID this request is for
1181 *
1182 * Return: 0 if all goes well, else appropriate error message
1183 */
ti_sci_proc_request(uint8_t proc_id)1184 int ti_sci_proc_request(uint8_t proc_id)
1185 {
1186 struct ti_sci_msg_req_proc_request req;
1187 struct ti_sci_msg_hdr resp;
1188
1189 struct ti_sci_xfer xfer;
1190 int ret;
1191
1192 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
1193 &req, sizeof(req),
1194 &resp, sizeof(resp),
1195 &xfer);
1196 if (ret) {
1197 ERROR("Message alloc failed (%d)\n", ret);
1198 return ret;
1199 }
1200
1201 req.processor_id = proc_id;
1202
1203 ret = ti_sci_do_xfer(&xfer);
1204 if (ret) {
1205 ERROR("Transfer send failed (%d)\n", ret);
1206 return ret;
1207 }
1208
1209 return 0;
1210 }
1211
1212 /**
1213 * ti_sci_proc_release() - Release a physical processor control
1214 *
1215 * @proc_id: Processor ID this request is for
1216 *
1217 * Return: 0 if all goes well, else appropriate error message
1218 */
ti_sci_proc_release(uint8_t proc_id)1219 int ti_sci_proc_release(uint8_t proc_id)
1220 {
1221 struct ti_sci_msg_req_proc_release req;
1222 struct ti_sci_msg_hdr resp;
1223
1224 struct ti_sci_xfer xfer;
1225 int ret;
1226
1227 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
1228 &req, sizeof(req),
1229 &resp, sizeof(resp),
1230 &xfer);
1231 if (ret) {
1232 ERROR("Message alloc failed (%d)\n", ret);
1233 return ret;
1234 }
1235
1236 req.processor_id = proc_id;
1237
1238 ret = ti_sci_do_xfer(&xfer);
1239 if (ret) {
1240 ERROR("Transfer send failed (%d)\n", ret);
1241 return ret;
1242 }
1243
1244 return 0;
1245 }
1246
1247 /**
1248 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1249 * the processor's access control list.
1250 *
1251 * @proc_id: Processor ID this request is for
1252 * @host_id: Host ID to get the control of the processor
1253 *
1254 * Return: 0 if all goes well, else appropriate error message
1255 */
ti_sci_proc_handover(uint8_t proc_id,uint8_t host_id)1256 int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1257 {
1258 struct ti_sci_msg_req_proc_handover req;
1259 struct ti_sci_msg_hdr resp;
1260
1261 struct ti_sci_xfer xfer;
1262 int ret;
1263
1264 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
1265 &req, sizeof(req),
1266 &resp, sizeof(resp),
1267 &xfer);
1268 if (ret) {
1269 ERROR("Message alloc failed (%d)\n", ret);
1270 return ret;
1271 }
1272
1273 req.processor_id = proc_id;
1274 req.host_id = host_id;
1275
1276 ret = ti_sci_do_xfer(&xfer);
1277 if (ret) {
1278 ERROR("Transfer send failed (%d)\n", ret);
1279 return ret;
1280 }
1281
1282 return 0;
1283 }
1284
1285 /**
1286 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1287 *
1288 * @proc_id: Processor ID this request is for
1289 * @config_flags_set: Configuration flags to be set
1290 * @config_flags_clear: Configuration flags to be cleared
1291 *
1292 * Return: 0 if all goes well, else appropriate error message
1293 */
ti_sci_proc_set_boot_cfg(uint8_t proc_id,uint64_t bootvector,uint32_t config_flags_set,uint32_t config_flags_clear)1294 int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1295 uint32_t config_flags_set,
1296 uint32_t config_flags_clear)
1297 {
1298 struct ti_sci_msg_req_set_proc_boot_config req;
1299 struct ti_sci_msg_hdr resp;
1300
1301 struct ti_sci_xfer xfer;
1302 int ret;
1303
1304 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
1305 &req, sizeof(req),
1306 &resp, sizeof(resp),
1307 &xfer);
1308 if (ret) {
1309 ERROR("Message alloc failed (%d)\n", ret);
1310 return ret;
1311 }
1312
1313 req.processor_id = proc_id;
1314 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1315 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1316 TISCI_ADDR_HIGH_SHIFT;
1317 req.config_flags_set = config_flags_set;
1318 req.config_flags_clear = config_flags_clear;
1319
1320 ret = ti_sci_do_xfer(&xfer);
1321 if (ret) {
1322 ERROR("Transfer send failed (%d)\n", ret);
1323 return ret;
1324 }
1325
1326 return 0;
1327 }
1328
1329 /**
1330 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1331 *
1332 * @proc_id: Processor ID this request is for
1333 * @control_flags_set: Control flags to be set
1334 * @control_flags_clear: Control flags to be cleared
1335 *
1336 * Return: 0 if all goes well, else appropriate error message
1337 */
ti_sci_proc_set_boot_ctrl(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1338 int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1339 uint32_t control_flags_clear)
1340 {
1341 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1342 struct ti_sci_msg_hdr resp;
1343
1344 struct ti_sci_xfer xfer;
1345 int ret;
1346
1347 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1348 &req, sizeof(req),
1349 &resp, sizeof(resp),
1350 &xfer);
1351 if (ret) {
1352 ERROR("Message alloc failed (%d)\n", ret);
1353 return ret;
1354 }
1355
1356 req.processor_id = proc_id;
1357 req.control_flags_set = control_flags_set;
1358 req.control_flags_clear = control_flags_clear;
1359
1360 ret = ti_sci_do_xfer(&xfer);
1361 if (ret) {
1362 ERROR("Transfer send failed (%d)\n", ret);
1363 return ret;
1364 }
1365
1366 return 0;
1367 }
1368
1369 /**
1370 * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1371 * without requesting or waiting for a
1372 * response.
1373 *
1374 * @proc_id: Processor ID this request is for
1375 * @control_flags_set: Control flags to be set
1376 * @control_flags_clear: Control flags to be cleared
1377 *
1378 * Return: 0 if all goes well, else appropriate error message
1379 */
ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1380 int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1381 uint32_t control_flags_set,
1382 uint32_t control_flags_clear)
1383 {
1384 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1385 struct ti_sci_msg_hdr *hdr;
1386 struct k3_sec_proxy_msg tx_message;
1387 int ret;
1388
1389 /* Ensure we have sane transfer size */
1390 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
1391 return -ERANGE;
1392
1393 hdr = (struct ti_sci_msg_hdr *)&req;
1394 hdr->seq = ++message_sequence;
1395 hdr->type = TISCI_MSG_SET_PROC_BOOT_CTRL;
1396 hdr->host = TI_SCI_HOST_ID;
1397 /* Setup with NORESPONSE flag to keep response queue clean */
1398 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
1399
1400 req.processor_id = proc_id;
1401 req.control_flags_set = control_flags_set;
1402 req.control_flags_clear = control_flags_clear;
1403
1404 tx_message.buf = (uint8_t *)&req;
1405 tx_message.len = sizeof(req);
1406
1407 /* Send message */
1408 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
1409 if (ret) {
1410 ERROR("Message sending failed (%d)\n", ret);
1411 return ret;
1412 }
1413
1414 /* Return without waiting for response */
1415 return 0;
1416 }
1417
1418 /**
1419 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1420 * processor configuration flags
1421 *
1422 * @proc_id: Processor ID this request is for
1423 * @cert_addr: Memory address at which payload image certificate is located
1424 *
1425 * Return: 0 if all goes well, else appropriate error message
1426 */
ti_sci_proc_auth_boot_image(uint8_t proc_id,uint64_t cert_addr)1427 int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1428 {
1429 struct ti_sci_msg_req_proc_auth_boot_image req;
1430 struct ti_sci_msg_hdr resp;
1431
1432 struct ti_sci_xfer xfer;
1433 int ret;
1434
1435 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMIAGE, 0,
1436 &req, sizeof(req),
1437 &resp, sizeof(resp),
1438 &xfer);
1439 if (ret) {
1440 ERROR("Message alloc failed (%d)\n", ret);
1441 return ret;
1442 }
1443
1444 req.processor_id = proc_id;
1445 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1446 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1447 TISCI_ADDR_HIGH_SHIFT;
1448
1449 ret = ti_sci_do_xfer(&xfer);
1450 if (ret) {
1451 ERROR("Transfer send failed (%d)\n", ret);
1452 return ret;
1453 }
1454
1455 return 0;
1456 }
1457
1458 /**
1459 * ti_sci_proc_get_boot_status() - Get the processor boot status
1460 *
1461 * @proc_id: Processor ID this request is for
1462 *
1463 * Return: 0 if all goes well, else appropriate error message
1464 */
ti_sci_proc_get_boot_status(uint8_t proc_id,uint64_t * bv,uint32_t * cfg_flags,uint32_t * ctrl_flags,uint32_t * sts_flags)1465 int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1466 uint32_t *cfg_flags,
1467 uint32_t *ctrl_flags,
1468 uint32_t *sts_flags)
1469 {
1470 struct ti_sci_msg_req_get_proc_boot_status req;
1471 struct ti_sci_msg_resp_get_proc_boot_status resp;
1472
1473 struct ti_sci_xfer xfer;
1474 int ret;
1475
1476 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
1477 &req, sizeof(req),
1478 &resp, sizeof(resp),
1479 &xfer);
1480 if (ret) {
1481 ERROR("Message alloc failed (%d)\n", ret);
1482 return ret;
1483 }
1484
1485 req.processor_id = proc_id;
1486
1487 ret = ti_sci_do_xfer(&xfer);
1488 if (ret) {
1489 ERROR("Transfer send failed (%d)\n", ret);
1490 return ret;
1491 }
1492
1493 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1494 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1495 TISCI_ADDR_HIGH_MASK);
1496 *cfg_flags = resp.config_flags;
1497 *ctrl_flags = resp.control_flags;
1498 *sts_flags = resp.status_flags;
1499
1500 return 0;
1501 }
1502
1503 /**
1504 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1505 *
1506 * @proc_id: Processor ID this request is for
1507 * @num_wait_iterations Total number of iterations we will check before
1508 * we will timeout and give up
1509 * @num_match_iterations How many iterations should we have continued
1510 * status to account for status bits glitching.
1511 * This is to make sure that match occurs for
1512 * consecutive checks. This implies that the
1513 * worst case should consider that the stable
1514 * time should at the worst be num_wait_iterations
1515 * num_match_iterations to prevent timeout.
1516 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1517 * between each status checks. This is the minimum
1518 * duration, and overhead of register reads and
1519 * checks are on top of this and can vary based on
1520 * varied conditions.
1521 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1522 * before the very first check in the first
1523 * iteration of status check loop. This is the
1524 * minimum duration, and overhead of register
1525 * reads and checks are.
1526 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1527 * status matching this field requested MUST be 1.
1528 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1529 * bits matching this field requested MUST be 1.
1530 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1531 * status matching this field requested MUST be 0.
1532 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1533 * bits matching this field requested MUST be 0.
1534 *
1535 * Return: 0 if all goes well, else appropriate error message
1536 */
ti_sci_proc_wait_boot_status(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1537 int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1538 uint8_t num_match_iterations,
1539 uint8_t delay_per_iteration_us,
1540 uint8_t delay_before_iterations_us,
1541 uint32_t status_flags_1_set_all_wait,
1542 uint32_t status_flags_1_set_any_wait,
1543 uint32_t status_flags_1_clr_all_wait,
1544 uint32_t status_flags_1_clr_any_wait)
1545 {
1546 struct ti_sci_msg_req_wait_proc_boot_status req;
1547 struct ti_sci_msg_hdr resp;
1548
1549 struct ti_sci_xfer xfer;
1550 int ret;
1551
1552 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1553 &req, sizeof(req),
1554 &resp, sizeof(resp),
1555 &xfer);
1556 if (ret) {
1557 ERROR("Message alloc failed (%d)\n", ret);
1558 return ret;
1559 }
1560
1561 req.processor_id = proc_id;
1562 req.num_wait_iterations = num_wait_iterations;
1563 req.num_match_iterations = num_match_iterations;
1564 req.delay_per_iteration_us = delay_per_iteration_us;
1565 req.delay_before_iterations_us = delay_before_iterations_us;
1566 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1567 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1568 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1569 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1570
1571 ret = ti_sci_do_xfer(&xfer);
1572 if (ret) {
1573 ERROR("Transfer send failed (%d)\n", ret);
1574 return ret;
1575 }
1576
1577 return 0;
1578 }
1579
1580 /**
1581 * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1582 * without requesting or waiting for
1583 * a response.
1584 *
1585 * @proc_id: Processor ID this request is for
1586 * @num_wait_iterations Total number of iterations we will check before
1587 * we will timeout and give up
1588 * @num_match_iterations How many iterations should we have continued
1589 * status to account for status bits glitching.
1590 * This is to make sure that match occurs for
1591 * consecutive checks. This implies that the
1592 * worst case should consider that the stable
1593 * time should at the worst be num_wait_iterations
1594 * num_match_iterations to prevent timeout.
1595 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1596 * between each status checks. This is the minimum
1597 * duration, and overhead of register reads and
1598 * checks are on top of this and can vary based on
1599 * varied conditions.
1600 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1601 * before the very first check in the first
1602 * iteration of status check loop. This is the
1603 * minimum duration, and overhead of register
1604 * reads and checks are.
1605 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1606 * status matching this field requested MUST be 1.
1607 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1608 * bits matching this field requested MUST be 1.
1609 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1610 * status matching this field requested MUST be 0.
1611 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1612 * bits matching this field requested MUST be 0.
1613 *
1614 * Return: 0 if all goes well, else appropriate error message
1615 */
ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1616 int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1617 uint8_t num_wait_iterations,
1618 uint8_t num_match_iterations,
1619 uint8_t delay_per_iteration_us,
1620 uint8_t delay_before_iterations_us,
1621 uint32_t status_flags_1_set_all_wait,
1622 uint32_t status_flags_1_set_any_wait,
1623 uint32_t status_flags_1_clr_all_wait,
1624 uint32_t status_flags_1_clr_any_wait)
1625 {
1626 struct ti_sci_msg_req_wait_proc_boot_status req;
1627 struct ti_sci_msg_hdr *hdr;
1628 struct k3_sec_proxy_msg tx_message;
1629 int ret;
1630
1631 /* Ensure we have sane transfer size */
1632 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE)
1633 return -ERANGE;
1634
1635 hdr = (struct ti_sci_msg_hdr *)&req;
1636 hdr->seq = ++message_sequence;
1637 hdr->type = TISCI_MSG_WAIT_PROC_BOOT_STATUS;
1638 hdr->host = TI_SCI_HOST_ID;
1639 /* Setup with NORESPONSE flag to keep response queue clean */
1640 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
1641
1642 req.processor_id = proc_id;
1643 req.num_wait_iterations = num_wait_iterations;
1644 req.num_match_iterations = num_match_iterations;
1645 req.delay_per_iteration_us = delay_per_iteration_us;
1646 req.delay_before_iterations_us = delay_before_iterations_us;
1647 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1648 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1649 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1650 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1651
1652 tx_message.buf = (uint8_t *)&req;
1653 tx_message.len = sizeof(req);
1654
1655 /* Send message */
1656 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
1657 if (ret) {
1658 ERROR("Message sending failed (%d)\n", ret);
1659 return ret;
1660 }
1661
1662 /* Return without waiting for response */
1663 return 0;
1664 }
1665
1666 /**
1667 * ti_sci_enter_sleep - Command to initiate system transition into suspend.
1668 *
1669 * @proc_id: Processor ID.
1670 * @mode: Low power mode to enter.
1671 * @core_resume_addr: Address that core should be
1672 * resumed from after low power transition.
1673 *
1674 * Return: 0 if all goes well, else appropriate error message
1675 */
ti_sci_enter_sleep(uint8_t proc_id,uint8_t mode,uint64_t core_resume_addr)1676 int ti_sci_enter_sleep(uint8_t proc_id,
1677 uint8_t mode,
1678 uint64_t core_resume_addr)
1679 {
1680 struct ti_sci_msg_req_enter_sleep req;
1681 struct ti_sci_msg_hdr *hdr;
1682 struct k3_sec_proxy_msg tx_message;
1683 int ret;
1684
1685 /* Ensure we have sane transfer size */
1686 if (sizeof(req) > TI_SCI_MAX_MESSAGE_SIZE) {
1687 return -ERANGE;
1688 }
1689
1690 hdr = (struct ti_sci_msg_hdr *)&req;
1691 hdr->seq = ++message_sequence;
1692 hdr->type = TI_SCI_MSG_ENTER_SLEEP;
1693 hdr->host = TI_SCI_HOST_ID;
1694 /* Setup with NORESPONSE flag to keep response queue clean */
1695 hdr->flags = TI_SCI_FLAG_REQ_GENERIC_NORESPONSE;
1696
1697 req.processor_id = proc_id;
1698 req.mode = mode;
1699 req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK;
1700 req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >>
1701 TISCI_ADDR_HIGH_SHIFT;
1702
1703 tx_message.buf = (uint8_t *)&req;
1704 tx_message.len = sizeof(req);
1705
1706 /* Send message */
1707 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &tx_message);
1708 if (ret != 0) {
1709 ERROR("Message sending failed (%d)\n", ret);
1710 return ret;
1711 }
1712
1713 /* Return without waiting for response */
1714 return 0;
1715 }
1716
1717 /**
1718 * ti_sci_init() - Basic initialization
1719 *
1720 * Return: 0 if all goes well, else appropriate error message
1721 */
ti_sci_init(void)1722 int ti_sci_init(void)
1723 {
1724 struct ti_sci_msg_resp_version rev_info;
1725 int ret;
1726
1727 ret = ti_sci_get_revision(&rev_info);
1728 if (ret) {
1729 ERROR("Unable to communicate with control firmware (%d)\n", ret);
1730 return ret;
1731 }
1732
1733 INFO("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
1734 rev_info.abi_major, rev_info.abi_minor,
1735 rev_info.firmware_revision,
1736 rev_info.firmware_description);
1737
1738 return 0;
1739 }
1740