1 /*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
5 * Copyright (C) 2018-2025 Texas Instruments Incorporated - https://www.ti.com/
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
10 #include <assert.h>
11 #include <errno.h>
12 #include <stdbool.h>
13 #include <stddef.h>
14 #include <string.h>
15
16 #include <platform_def.h>
17 #include <lib/bakery_lock.h>
18
19 #include <common/debug.h>
20 #include <ti_sci_transport.h>
21
22 #include "ti_sci_protocol.h"
23 #include "ti_sci.h"
24
25 #if USE_COHERENT_MEM
26 __section(".tzfw_coherent_mem")
27 #endif
28 static uint8_t message_sequence;
29
30 DEFINE_BAKERY_LOCK(ti_sci_xfer_lock);
31
32 /**
33 * struct ti_sci_xfer - Structure representing a message flow
34 * @tx_message: Transmit message
35 * @rx_message: Receive message
36 */
37 struct ti_sci_xfer {
38 struct ti_sci_msg tx_message;
39 struct ti_sci_msg rx_message;
40 };
41
42 /**
43 * ti_sci_setup_one_xfer() - Setup one message type
44 *
45 * @msg_type: Message type
46 * @msg_flags: Flag to set for the message
47 * @tx_buf: Buffer to be sent to mailbox channel
48 * @tx_message_size: transmit message size
49 * @rx_buf: Buffer to be received from mailbox channel
50 * @rx_message_size: receive message size
51 *
52 * Helper function which is used by various command functions that are
53 * exposed to clients of this driver for allocating a message traffic event.
54 *
55 * Return: 0 if all goes well, else appropriate error message
56 */
ti_sci_setup_one_xfer(uint16_t msg_type,uint32_t msg_flags,void * tx_buf,size_t tx_message_size,void * rx_buf,size_t rx_message_size,struct ti_sci_xfer * xfer)57 static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
58 void *tx_buf,
59 size_t tx_message_size,
60 void *rx_buf,
61 size_t rx_message_size,
62 struct ti_sci_xfer *xfer)
63 {
64 struct ti_sci_msg_hdr *hdr;
65
66 /* Ensure we have sane transfer sizes */
67 if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
68 tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
69 tx_message_size < sizeof(*hdr))
70 return -ERANGE;
71
72 hdr = (struct ti_sci_msg_hdr *)tx_buf;
73
74 /* TODO: Calculate checksum */
75 hdr->sec_hdr.checksum = 0;
76 hdr->seq = ++message_sequence;
77 hdr->type = msg_type;
78 hdr->host = TI_SCI_HOST_ID;
79 hdr->flags = msg_flags;
80 /* Request a response if rx_message_size is non-zero */
81 if (rx_message_size != 0U) {
82 hdr->flags |= TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
83 }
84
85 xfer->tx_message.buf = tx_buf;
86 xfer->tx_message.len = tx_message_size;
87
88 xfer->rx_message.buf = rx_buf;
89 xfer->rx_message.len = rx_message_size;
90
91 return 0;
92 }
93
94 /**
95 * ti_sci_get_response() - Receive response from mailbox channel
96 *
97 * @xfer: Transfer to initiate and wait for response
98 * @chan: Channel to receive the response
99 *
100 * Return: 0 if all goes well, else appropriate error message
101 */
ti_sci_get_response(struct ti_sci_msg * msg,enum ti_sci_transport_chan_id chan)102 static int ti_sci_get_response(struct ti_sci_msg *msg,
103 enum ti_sci_transport_chan_id chan)
104 {
105 struct ti_sci_msg_hdr *hdr;
106 unsigned int retry = 5;
107 int ret;
108
109 for (; retry > 0; retry--) {
110 /* Receive the response */
111 ret = ti_sci_transport_recv(chan, msg);
112 if (ret) {
113 ERROR("Message receive failed (%d)\n", ret);
114 return ret;
115 }
116
117 /* msg is updated by Secure Proxy driver */
118 hdr = (struct ti_sci_msg_hdr *)msg->buf;
119
120 /* Sanity check for message response */
121 if (hdr->seq == message_sequence)
122 break;
123 else
124 WARN("Message with sequence ID %u is not expected\n", hdr->seq);
125 }
126 if (!retry) {
127 ERROR("Timed out waiting for message\n");
128 return -EINVAL;
129 }
130
131 if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
132 ERROR("Unable to handle %lu xfer (max %d)\n",
133 msg->len, TI_SCI_MAX_MESSAGE_SIZE);
134 return -EINVAL;
135 }
136
137 if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
138 return -ENODEV;
139
140 /* TODO: Verify checksum */
141 (void)hdr->sec_hdr.checksum;
142
143 return 0;
144 }
145
146 /**
147 * ti_sci_do_xfer() - Do one transfer
148 *
149 * @xfer: Transfer to initiate and wait for response
150 *
151 * Return: 0 if all goes well, else appropriate error message
152 */
ti_sci_do_xfer(struct ti_sci_xfer * xfer)153 static int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
154 {
155 struct ti_sci_msg *tx_msg = &xfer->tx_message;
156 struct ti_sci_msg *rx_msg = &xfer->rx_message;
157 int ret;
158
159 bakery_lock_get(&ti_sci_xfer_lock);
160
161 /* Clear any spurious messages in receive queue */
162 ret = ti_sci_transport_clear_rx_thread(RX_SECURE_TRANSPORT_CHANNEL_ID);
163 if (ret) {
164 ERROR("Could not clear response queue (%d)\n", ret);
165 goto unlock;
166 }
167
168 /* Send the message */
169 ret = ti_sci_transport_send(TX_SECURE_TRANSPORT_CHANNEL_ID, tx_msg);
170 if (ret) {
171 ERROR("Message sending failed (%d)\n", ret);
172 goto unlock;
173 }
174
175 /* Get the response if requested */
176 if (rx_msg->len != 0U) {
177 ret = ti_sci_get_response(rx_msg, RX_SECURE_TRANSPORT_CHANNEL_ID);
178 if (ret != 0U) {
179 ERROR("Failed to get response (%d)\n", ret);
180 goto unlock;
181 }
182 }
183
184 unlock:
185 bakery_lock_release(&ti_sci_xfer_lock);
186
187 return ret;
188 }
189
190 /**
191 * copy_revision_resp() - Copy the FW revision response into an internal struct
192 *
193 * @version: Structure containing the version info
194 * @rev_info: Response for firmware version information message
195 */
copy_revision_resp(struct ti_sci_msg_version * version,struct ti_sci_msg_resp_version * rev_info)196 static void copy_revision_resp(struct ti_sci_msg_version *version,
197 struct ti_sci_msg_resp_version *rev_info)
198 {
199 memcpy(version->firmware_description, rev_info->firmware_description,
200 sizeof(rev_info->firmware_description));
201 version->abi_major = rev_info->abi_major;
202 version->abi_minor = rev_info->abi_minor;
203 version->firmware_revision = rev_info->firmware_revision;
204 version->sub_version = rev_info->sub_version;
205 version->patch_version = rev_info->patch_version;
206 }
207
208 /**
209 * ti_sci_get_revision() - Get the revision of the SCI entity
210 *
211 * Updates the SCI information in the internal data structure.
212 *
213 * @version: Structure containing the version info
214 *
215 * Return: 0 if all goes well, else appropriate error message
216 */
ti_sci_get_revision(struct ti_sci_msg_version * version)217 int ti_sci_get_revision(struct ti_sci_msg_version *version)
218 {
219 static struct ti_sci_msg_resp_version rev_info __aligned(4);
220 struct ti_sci_msg_hdr hdr;
221 struct ti_sci_xfer xfer;
222 static bool revision_cached;
223 int ret;
224
225 if (revision_cached) {
226 copy_revision_resp(version, &rev_info);
227 return 0;
228 }
229
230 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
231 &hdr, sizeof(hdr),
232 &rev_info, sizeof(rev_info),
233 &xfer);
234 if (ret) {
235 ERROR("Message alloc failed (%d)\n", ret);
236 return ret;
237 }
238
239 ret = ti_sci_do_xfer(&xfer);
240 if (ret) {
241 ERROR("Transfer send failed (%d)\n", ret);
242 return ret;
243 }
244
245 revision_cached = true;
246 copy_revision_resp(version, &rev_info);
247
248 return 0;
249 }
250
251 /**
252 * ti_sci_query_fw_caps() - Get the FW/SoC capabilities
253 * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
254 *
255 * Return: 0 if all went well, else returns appropriate error value.
256 */
ti_sci_query_fw_caps(uint64_t * fw_caps)257 int ti_sci_query_fw_caps(uint64_t *fw_caps)
258 {
259 struct ti_sci_msg_hdr req;
260 static struct ti_sci_msg_resp_query_fw_caps resp __aligned(4);
261 struct ti_sci_xfer xfer;
262 static bool caps_cached;
263 int ret = 0;
264
265 if (!fw_caps)
266 return -EINVAL;
267
268 if (caps_cached) {
269 *fw_caps = resp.fw_caps;
270 return 0;
271 }
272
273 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_FW_CAPS, 0,
274 &req, sizeof(req),
275 &resp, sizeof(resp),
276 &xfer);
277 if (ret != 0U) {
278 ERROR("Message alloc failed (%d)\n", ret);
279 return ret;
280 }
281
282 ret = ti_sci_do_xfer(&xfer);
283 if (ret != 0U) {
284 ERROR("Transfer send failed (%d)\n", ret);
285 return ret;
286 }
287
288 caps_cached = true;
289 *fw_caps = resp.fw_caps;
290
291 return 0;
292 }
293
294 /**
295 * ti_sci_device_set_state() - Set device state
296 *
297 * @id: Device identifier
298 * @flags: flags to setup for the device
299 * @state: State to move the device to
300 *
301 * Return: 0 if all goes well, else appropriate error message
302 */
ti_sci_device_set_state(uint32_t id,uint32_t flags,uint8_t state)303 static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
304 {
305 struct ti_sci_msg_req_set_device_state req;
306 struct ti_sci_msg_hdr resp;
307
308 struct ti_sci_xfer xfer;
309 int ret;
310
311 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
312 &req, sizeof(req),
313 &resp, sizeof(resp),
314 &xfer);
315 if (ret) {
316 ERROR("Message alloc failed (%d)\n", ret);
317 return ret;
318 }
319
320 req.id = id;
321 req.state = state;
322
323 ret = ti_sci_do_xfer(&xfer);
324 if (ret) {
325 ERROR("Transfer send failed (%d)\n", ret);
326 return ret;
327 }
328
329 return 0;
330 }
331
332 /**
333 * ti_sci_device_get_state() - Get device state
334 *
335 * @id: Device Identifier
336 * @clcnt: Pointer to Context Loss Count
337 * @resets: pointer to resets
338 * @p_state: pointer to p_state
339 * @c_state: pointer to c_state
340 *
341 * Return: 0 if all goes well, else appropriate error message
342 */
ti_sci_device_get_state(uint32_t id,uint32_t * clcnt,uint32_t * resets,uint8_t * p_state,uint8_t * c_state)343 static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
344 uint32_t *resets, uint8_t *p_state,
345 uint8_t *c_state)
346 {
347 struct ti_sci_msg_req_get_device_state req;
348 struct ti_sci_msg_resp_get_device_state resp;
349
350 struct ti_sci_xfer xfer;
351 int ret;
352
353 if (!clcnt && !resets && !p_state && !c_state)
354 return -EINVAL;
355
356 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
357 &req, sizeof(req),
358 &resp, sizeof(resp),
359 &xfer);
360 if (ret) {
361 ERROR("Message alloc failed (%d)\n", ret);
362 return ret;
363 }
364
365 req.id = id;
366
367 ret = ti_sci_do_xfer(&xfer);
368 if (ret) {
369 ERROR("Transfer send failed (%d)\n", ret);
370 return ret;
371 }
372
373 if (clcnt)
374 *clcnt = resp.context_loss_count;
375 if (resets)
376 *resets = resp.resets;
377 if (p_state)
378 *p_state = resp.programmed_state;
379 if (c_state)
380 *c_state = resp.current_state;
381
382 return 0;
383 }
384
385 /**
386 * ti_sci_device_get() - Request for device managed by TISCI
387 *
388 * @id: Device Identifier
389 *
390 * Request for the device - NOTE: the client MUST maintain integrity of
391 * usage count by balancing get_device with put_device. No refcounting is
392 * managed by driver for that purpose.
393 *
394 * Return: 0 if all goes well, else appropriate error message
395 */
ti_sci_device_get(uint32_t id)396 int ti_sci_device_get(uint32_t id)
397 {
398 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
399 }
400
401 /**
402 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
403 *
404 * @id: Device Identifier
405 *
406 * Request for the device - NOTE: the client MUST maintain integrity of
407 * usage count by balancing get_device with put_device. No refcounting is
408 * managed by driver for that purpose.
409 *
410 * NOTE: This _exclusive version of the get API is for exclusive access to the
411 * device. Any other host in the system will fail to get this device after this
412 * call until exclusive access is released with device_put or a non-exclusive
413 * set call.
414 *
415 * Return: 0 if all goes well, else appropriate error message
416 */
ti_sci_device_get_exclusive(uint32_t id)417 int ti_sci_device_get_exclusive(uint32_t id)
418 {
419 return ti_sci_device_set_state(id,
420 MSG_FLAG_DEVICE_EXCLUSIVE,
421 MSG_DEVICE_SW_STATE_ON);
422 }
423
424 /**
425 * ti_sci_device_idle() - Idle a device managed by TISCI
426 *
427 * @id: Device Identifier
428 *
429 * Request for the device - NOTE: the client MUST maintain integrity of
430 * usage count by balancing get_device with put_device. No refcounting is
431 * managed by driver for that purpose.
432 *
433 * Return: 0 if all goes well, else appropriate error message
434 */
ti_sci_device_idle(uint32_t id)435 int ti_sci_device_idle(uint32_t id)
436 {
437 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
438 }
439
440 /**
441 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
442 *
443 * @id: Device Identifier
444 *
445 * Request for the device - NOTE: the client MUST maintain integrity of
446 * usage count by balancing get_device with put_device. No refcounting is
447 * managed by driver for that purpose.
448 *
449 * NOTE: This _exclusive version of the idle API is for exclusive access to
450 * the device. Any other host in the system will fail to get this device after
451 * this call until exclusive access is released with device_put or a
452 * non-exclusive set call.
453 *
454 * Return: 0 if all goes well, else appropriate error message
455 */
ti_sci_device_idle_exclusive(uint32_t id)456 int ti_sci_device_idle_exclusive(uint32_t id)
457 {
458 return ti_sci_device_set_state(id,
459 MSG_FLAG_DEVICE_EXCLUSIVE,
460 MSG_DEVICE_SW_STATE_RETENTION);
461 }
462
463 /**
464 * ti_sci_device_put() - Release a device managed by TISCI
465 *
466 * @id: Device Identifier
467 *
468 * Request for the device - NOTE: the client MUST maintain integrity of
469 * usage count by balancing get_device with put_device. No refcounting is
470 * managed by driver for that purpose.
471 *
472 * Return: 0 if all goes well, else appropriate error message
473 */
ti_sci_device_put(uint32_t id)474 int ti_sci_device_put(uint32_t id)
475 {
476 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
477 }
478
479 /**
480 * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
481 * for a response.
482 *
483 * @id: Device Identifier
484 *
485 * Request for the device - NOTE: the client MUST maintain integrity of
486 * usage count by balancing get_device with put_device. No refcounting is
487 * managed by driver for that purpose.
488 *
489 * Return: 0 if all goes well, else appropriate error message
490 */
ti_sci_device_put_no_wait(uint32_t id)491 int ti_sci_device_put_no_wait(uint32_t id)
492 {
493 struct ti_sci_msg_req_set_device_state req;
494 struct ti_sci_xfer xfer;
495 int ret;
496
497 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, 0,
498 &req, sizeof(req),
499 NULL, 0,
500 &xfer);
501 if (ret != 0U) {
502 ERROR("Message alloc failed (%d)\n", ret);
503 return ret;
504 }
505
506 req.id = id;
507 req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
508
509 ret = ti_sci_do_xfer(&xfer);
510 if (ret != 0U) {
511 ERROR("Transfer send failed (%d)\n", ret);
512 return ret;
513 }
514
515 return 0;
516 }
517
518 /**
519 * ti_sci_device_is_valid() - Is the device valid
520 *
521 * @id: Device Identifier
522 *
523 * Return: 0 if all goes well and the device ID is valid, else return
524 * appropriate error
525 */
ti_sci_device_is_valid(uint32_t id)526 int ti_sci_device_is_valid(uint32_t id)
527 {
528 uint8_t unused;
529
530 /* check the device state which will also tell us if the ID is valid */
531 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
532 }
533
534 /**
535 * ti_sci_device_get_clcnt() - Get context loss counter
536 *
537 * @id: Device Identifier
538 * @count: Pointer to Context Loss counter to populate
539 *
540 * Return: 0 if all goes well, else appropriate error message
541 */
ti_sci_device_get_clcnt(uint32_t id,uint32_t * count)542 int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
543 {
544 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
545 }
546
547 /**
548 * ti_sci_device_is_idle() - Check if the device is requested to be idle
549 *
550 * @id: Device Identifier
551 * @r_state: true if requested to be idle
552 *
553 * Return: 0 if all goes well, else appropriate error message
554 */
ti_sci_device_is_idle(uint32_t id,bool * r_state)555 int ti_sci_device_is_idle(uint32_t id, bool *r_state)
556 {
557 int ret;
558 uint8_t state;
559
560 if (!r_state)
561 return -EINVAL;
562
563 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
564 if (ret)
565 return ret;
566
567 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
568
569 return 0;
570 }
571
572 /**
573 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
574 *
575 * @id: Device Identifier
576 * @r_state: true if requested to be stopped
577 * @curr_state: true if currently stopped
578 *
579 * Return: 0 if all goes well, else appropriate error message
580 */
ti_sci_device_is_stop(uint32_t id,bool * r_state,bool * curr_state)581 int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
582 {
583 int ret;
584 uint8_t p_state, c_state;
585
586 if (!r_state && !curr_state)
587 return -EINVAL;
588
589 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
590 if (ret)
591 return ret;
592
593 if (r_state)
594 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
595 if (curr_state)
596 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
597
598 return 0;
599 }
600
601 /**
602 * ti_sci_device_is_on() - Check if the device is requested to be ON
603 *
604 * @id: Device Identifier
605 * @r_state: true if requested to be ON
606 * @curr_state: true if currently ON and active
607 *
608 * Return: 0 if all goes well, else appropriate error message
609 */
ti_sci_device_is_on(uint32_t id,bool * r_state,bool * curr_state)610 int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
611 {
612 int ret;
613 uint8_t p_state, c_state;
614
615 if (!r_state && !curr_state)
616 return -EINVAL;
617
618 ret =
619 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
620 if (ret)
621 return ret;
622
623 if (r_state)
624 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
625 if (curr_state)
626 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
627
628 return 0;
629 }
630
631 /**
632 * ti_sci_device_is_trans() - Check if the device is currently transitioning
633 *
634 * @id: Device Identifier
635 * @curr_state: true if currently transitioning
636 *
637 * Return: 0 if all goes well, else appropriate error message
638 */
ti_sci_device_is_trans(uint32_t id,bool * curr_state)639 int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
640 {
641 int ret;
642 uint8_t state;
643
644 if (!curr_state)
645 return -EINVAL;
646
647 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
648 if (ret)
649 return ret;
650
651 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
652
653 return 0;
654 }
655
656 /**
657 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
658 *
659 * @id: Device Identifier
660 * @reset_state: Device specific reset bit field
661 *
662 * Return: 0 if all goes well, else appropriate error message
663 */
ti_sci_device_set_resets(uint32_t id,uint32_t reset_state)664 int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
665 {
666 struct ti_sci_msg_req_set_device_resets req;
667 struct ti_sci_msg_hdr resp;
668
669 struct ti_sci_xfer xfer;
670 int ret;
671
672 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
673 &req, sizeof(req),
674 &resp, sizeof(resp),
675 &xfer);
676 if (ret) {
677 ERROR("Message alloc failed (%d)\n", ret);
678 return ret;
679 }
680
681 req.id = id;
682 req.resets = reset_state;
683
684 ret = ti_sci_do_xfer(&xfer);
685 if (ret) {
686 ERROR("Transfer send failed (%d)\n", ret);
687 return ret;
688 }
689
690 return 0;
691 }
692
693 /**
694 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
695 *
696 * @id: Device Identifier
697 * @reset_state: Pointer to reset state to populate
698 *
699 * Return: 0 if all goes well, else appropriate error message
700 */
ti_sci_device_get_resets(uint32_t id,uint32_t * reset_state)701 int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
702 {
703 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
704 }
705
706 /**
707 * ti_sci_clock_set_state() - Set clock state helper
708 *
709 * @dev_id: Device identifier this request is for
710 * @clk_id: Clock identifier for the device for this request,
711 * Each device has its own set of clock inputs, This indexes
712 * which clock input to modify
713 * @flags: Header flags as needed
714 * @state: State to request for the clock
715 *
716 * Return: 0 if all goes well, else appropriate error message
717 */
ti_sci_clock_set_state(uint32_t dev_id,uint8_t clk_id,uint32_t flags,uint8_t state)718 int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
719 uint32_t flags, uint8_t state)
720 {
721 struct ti_sci_msg_req_set_clock_state req;
722 struct ti_sci_msg_hdr resp;
723
724 struct ti_sci_xfer xfer;
725 int ret;
726
727 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
728 &req, sizeof(req),
729 &resp, sizeof(resp),
730 &xfer);
731 if (ret) {
732 ERROR("Message alloc failed (%d)\n", ret);
733 return ret;
734 }
735
736 req.dev_id = dev_id;
737 req.clk_id = clk_id;
738 req.request_state = state;
739
740 ret = ti_sci_do_xfer(&xfer);
741 if (ret) {
742 ERROR("Transfer send failed (%d)\n", ret);
743 return ret;
744 }
745
746 return 0;
747 }
748
749 /**
750 * ti_sci_clock_get_state() - Get clock state helper
751 *
752 * @dev_id: Device identifier this request is for
753 * @clk_id: Clock identifier for the device for this request.
754 * Each device has its own set of clock inputs. This indexes
755 * which clock input to modify.
756 * @programmed_state: State requested for clock to move to
757 * @current_state: State that the clock is currently in
758 *
759 * Return: 0 if all goes well, else appropriate error message
760 */
ti_sci_clock_get_state(uint32_t dev_id,uint8_t clk_id,uint8_t * programmed_state,uint8_t * current_state)761 int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
762 uint8_t *programmed_state,
763 uint8_t *current_state)
764 {
765 struct ti_sci_msg_req_get_clock_state req;
766 struct ti_sci_msg_resp_get_clock_state resp;
767
768 struct ti_sci_xfer xfer;
769 int ret;
770
771 if (!programmed_state && !current_state)
772 return -EINVAL;
773
774 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
775 &req, sizeof(req),
776 &resp, sizeof(resp),
777 &xfer);
778 if (ret) {
779 ERROR("Message alloc failed (%d)\n", ret);
780 return ret;
781 }
782
783 req.dev_id = dev_id;
784 req.clk_id = clk_id;
785
786 ret = ti_sci_do_xfer(&xfer);
787 if (ret) {
788 ERROR("Transfer send failed (%d)\n", ret);
789 return ret;
790 }
791
792 if (programmed_state)
793 *programmed_state = resp.programmed_state;
794 if (current_state)
795 *current_state = resp.current_state;
796
797 return 0;
798 }
799
800 /**
801 * ti_sci_clock_get() - Get control of a clock from TI SCI
802
803 * @dev_id: Device identifier this request is for
804 * @clk_id: Clock identifier for the device for this request.
805 * Each device has its own set of clock inputs. This indexes
806 * which clock input to modify.
807 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
808 * @can_change_freq: 'true' iff frequency change is desired
809 * @enable_input_term: 'true' iff input termination is desired
810 *
811 * Return: 0 if all goes well, else appropriate error message
812 */
ti_sci_clock_get(uint32_t dev_id,uint8_t clk_id,bool needs_ssc,bool can_change_freq,bool enable_input_term)813 int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
814 bool needs_ssc, bool can_change_freq,
815 bool enable_input_term)
816 {
817 uint32_t flags = 0;
818
819 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
820 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
821 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
822
823 return ti_sci_clock_set_state(dev_id, clk_id, flags,
824 MSG_CLOCK_SW_STATE_REQ);
825 }
826
827 /**
828 * ti_sci_clock_idle() - Idle a clock which is in our control
829
830 * @dev_id: Device identifier this request is for
831 * @clk_id: Clock identifier for the device for this request.
832 * Each device has its own set of clock inputs. This indexes
833 * which clock input to modify.
834 *
835 * NOTE: This clock must have been requested by get_clock previously.
836 *
837 * Return: 0 if all goes well, else appropriate error message
838 */
ti_sci_clock_idle(uint32_t dev_id,uint8_t clk_id)839 int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
840 {
841 return ti_sci_clock_set_state(dev_id, clk_id, 0,
842 MSG_CLOCK_SW_STATE_UNREQ);
843 }
844
845 /**
846 * ti_sci_clock_put() - Release a clock from our control
847 *
848 * @dev_id: Device identifier this request is for
849 * @clk_id: Clock identifier for the device for this request.
850 * Each device has its own set of clock inputs. This indexes
851 * which clock input to modify.
852 *
853 * NOTE: This clock must have been requested by get_clock previously.
854 *
855 * Return: 0 if all goes well, else appropriate error message
856 */
ti_sci_clock_put(uint32_t dev_id,uint8_t clk_id)857 int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
858 {
859 return ti_sci_clock_set_state(dev_id, clk_id, 0,
860 MSG_CLOCK_SW_STATE_AUTO);
861 }
862
863 /**
864 * ti_sci_clock_is_auto() - Is the clock being auto managed
865 *
866 * @dev_id: Device identifier this request is for
867 * @clk_id: Clock identifier for the device for this request.
868 * Each device has its own set of clock inputs. This indexes
869 * which clock input to modify.
870 * @req_state: state indicating if the clock is auto managed
871 *
872 * Return: 0 if all goes well, else appropriate error message
873 */
ti_sci_clock_is_auto(uint32_t dev_id,uint8_t clk_id,bool * req_state)874 int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
875 {
876 uint8_t state = 0;
877 int ret;
878
879 if (!req_state)
880 return -EINVAL;
881
882 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
883 if (ret)
884 return ret;
885
886 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
887
888 return 0;
889 }
890
891 /**
892 * ti_sci_clock_is_on() - Is the clock ON
893 *
894 * @dev_id: Device identifier this request is for
895 * @clk_id: Clock identifier for the device for this request.
896 * Each device has its own set of clock inputs. This indexes
897 * which clock input to modify.
898 * @req_state: state indicating if the clock is managed by us and enabled
899 * @curr_state: state indicating if the clock is ready for operation
900 *
901 * Return: 0 if all goes well, else appropriate error message
902 */
ti_sci_clock_is_on(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)903 int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
904 bool *req_state, bool *curr_state)
905 {
906 uint8_t c_state = 0, r_state = 0;
907 int ret;
908
909 if (!req_state && !curr_state)
910 return -EINVAL;
911
912 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
913 if (ret)
914 return ret;
915
916 if (req_state)
917 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
918 if (curr_state)
919 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
920
921 return 0;
922 }
923
924 /**
925 * ti_sci_clock_is_off() - Is the clock OFF
926 *
927 * @dev_id: Device identifier this request is for
928 * @clk_id: Clock identifier for the device for this request.
929 * Each device has its own set of clock inputs. This indexes
930 * which clock input to modify.
931 * @req_state: state indicating if the clock is managed by us and disabled
932 * @curr_state: state indicating if the clock is NOT ready for operation
933 *
934 * Return: 0 if all goes well, else appropriate error message
935 */
ti_sci_clock_is_off(uint32_t dev_id,uint8_t clk_id,bool * req_state,bool * curr_state)936 int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
937 bool *req_state, bool *curr_state)
938 {
939 uint8_t c_state = 0, r_state = 0;
940 int ret;
941
942 if (!req_state && !curr_state)
943 return -EINVAL;
944
945 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
946 if (ret)
947 return ret;
948
949 if (req_state)
950 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
951 if (curr_state)
952 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
953
954 return 0;
955 }
956
957 /**
958 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
959 *
960 * @dev_id: Device identifier this request is for
961 * @clk_id: Clock identifier for the device for this request.
962 * Each device has its own set of clock inputs. This indexes
963 * which clock input to modify.
964 * @parent_id: Parent clock identifier to set
965 *
966 * Return: 0 if all goes well, else appropriate error message
967 */
ti_sci_clock_set_parent(uint32_t dev_id,uint8_t clk_id,uint8_t parent_id)968 int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
969 {
970 struct ti_sci_msg_req_set_clock_parent req;
971 struct ti_sci_msg_hdr resp;
972
973 struct ti_sci_xfer xfer;
974 int ret;
975
976 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
977 &req, sizeof(req),
978 &resp, sizeof(resp),
979 &xfer);
980 if (ret) {
981 ERROR("Message alloc failed (%d)\n", ret);
982 return ret;
983 }
984
985 req.dev_id = dev_id;
986 req.clk_id = clk_id;
987 req.parent_id = parent_id;
988
989 ret = ti_sci_do_xfer(&xfer);
990 if (ret) {
991 ERROR("Transfer send failed (%d)\n", ret);
992 return ret;
993 }
994
995 return 0;
996 }
997
998 /**
999 * ti_sci_clock_get_parent() - Get current parent clock source
1000 *
1001 * @dev_id: Device identifier this request is for
1002 * @clk_id: Clock identifier for the device for this request.
1003 * Each device has its own set of clock inputs. This indexes
1004 * which clock input to modify.
1005 * @parent_id: Current clock parent
1006 *
1007 * Return: 0 if all goes well, else appropriate error message
1008 */
ti_sci_clock_get_parent(uint32_t dev_id,uint8_t clk_id,uint8_t * parent_id)1009 int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
1010 {
1011 struct ti_sci_msg_req_get_clock_parent req;
1012 struct ti_sci_msg_resp_get_clock_parent resp;
1013
1014 struct ti_sci_xfer xfer;
1015 int ret;
1016
1017 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
1018 &req, sizeof(req),
1019 &resp, sizeof(resp),
1020 &xfer);
1021 if (ret) {
1022 ERROR("Message alloc failed (%d)\n", ret);
1023 return ret;
1024 }
1025
1026 req.dev_id = dev_id;
1027 req.clk_id = clk_id;
1028
1029 ret = ti_sci_do_xfer(&xfer);
1030 if (ret) {
1031 ERROR("Transfer send failed (%d)\n", ret);
1032 return ret;
1033 }
1034
1035 *parent_id = resp.parent_id;
1036
1037 return 0;
1038 }
1039
1040 /**
1041 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
1042 *
1043 * @dev_id: Device identifier this request is for
1044 * @clk_id: Clock identifier for the device for this request.
1045 * Each device has its own set of clock inputs. This indexes
1046 * which clock input to modify.
1047 * @num_parents: Returns he number of parents to the current clock.
1048 *
1049 * Return: 0 if all goes well, else appropriate error message
1050 */
ti_sci_clock_get_num_parents(uint32_t dev_id,uint8_t clk_id,uint8_t * num_parents)1051 int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
1052 uint8_t *num_parents)
1053 {
1054 struct ti_sci_msg_req_get_clock_num_parents req;
1055 struct ti_sci_msg_resp_get_clock_num_parents resp;
1056
1057 struct ti_sci_xfer xfer;
1058 int ret;
1059
1060 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
1061 &req, sizeof(req),
1062 &resp, sizeof(resp),
1063 &xfer);
1064 if (ret) {
1065 ERROR("Message alloc failed (%d)\n", ret);
1066 return ret;
1067 }
1068
1069 req.dev_id = dev_id;
1070 req.clk_id = clk_id;
1071
1072 ret = ti_sci_do_xfer(&xfer);
1073 if (ret) {
1074 ERROR("Transfer send failed (%d)\n", ret);
1075 return ret;
1076 }
1077
1078 *num_parents = resp.num_parents;
1079
1080 return 0;
1081 }
1082
1083 /**
1084 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1085 *
1086 * @dev_id: Device identifier this request is for
1087 * @clk_id: Clock identifier for the device for this request.
1088 * Each device has its own set of clock inputs. This indexes
1089 * which clock input to modify.
1090 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1091 * allowable programmed frequency and does not account for clock
1092 * tolerances and jitter.
1093 * @target_freq: The target clock frequency in Hz. A frequency will be
1094 * processed as close to this target frequency as possible.
1095 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1096 * allowable programmed frequency and does not account for clock
1097 * tolerances and jitter.
1098 * @match_freq: Frequency match in Hz response.
1099 *
1100 * Return: 0 if all goes well, else appropriate error message
1101 */
ti_sci_clock_get_match_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq,uint64_t * match_freq)1102 int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1103 uint64_t min_freq, uint64_t target_freq,
1104 uint64_t max_freq, uint64_t *match_freq)
1105 {
1106 struct ti_sci_msg_req_query_clock_freq req;
1107 struct ti_sci_msg_resp_query_clock_freq resp;
1108
1109 struct ti_sci_xfer xfer;
1110 int ret;
1111
1112 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
1113 &req, sizeof(req),
1114 &resp, sizeof(resp),
1115 &xfer);
1116 if (ret) {
1117 ERROR("Message alloc failed (%d)\n", ret);
1118 return ret;
1119 }
1120
1121 req.dev_id = dev_id;
1122 req.clk_id = clk_id;
1123 req.min_freq_hz = min_freq;
1124 req.target_freq_hz = target_freq;
1125 req.max_freq_hz = max_freq;
1126
1127 ret = ti_sci_do_xfer(&xfer);
1128 if (ret) {
1129 ERROR("Transfer send failed (%d)\n", ret);
1130 return ret;
1131 }
1132
1133 *match_freq = resp.freq_hz;
1134
1135 return 0;
1136 }
1137
1138 /**
1139 * ti_sci_clock_set_freq() - Set a frequency for clock
1140 *
1141 * @dev_id: Device identifier this request is for
1142 * @clk_id: Clock identifier for the device for this request.
1143 * Each device has its own set of clock inputs. This indexes
1144 * which clock input to modify.
1145 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1146 * allowable programmed frequency and does not account for clock
1147 * tolerances and jitter.
1148 * @target_freq: The target clock frequency in Hz. A frequency will be
1149 * processed as close to this target frequency as possible.
1150 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1151 * allowable programmed frequency and does not account for clock
1152 * tolerances and jitter.
1153 *
1154 * Return: 0 if all goes well, else appropriate error message
1155 */
ti_sci_clock_set_freq(uint32_t dev_id,uint8_t clk_id,uint64_t min_freq,uint64_t target_freq,uint64_t max_freq)1156 int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1157 uint64_t target_freq, uint64_t max_freq)
1158 {
1159 struct ti_sci_msg_req_set_clock_freq req;
1160 struct ti_sci_msg_hdr resp;
1161
1162 struct ti_sci_xfer xfer;
1163 int ret;
1164
1165 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
1166 &req, sizeof(req),
1167 &resp, sizeof(resp),
1168 &xfer);
1169 if (ret) {
1170 ERROR("Message alloc failed (%d)\n", ret);
1171 return ret;
1172 }
1173 req.dev_id = dev_id;
1174 req.clk_id = clk_id;
1175 req.min_freq_hz = min_freq;
1176 req.target_freq_hz = target_freq;
1177 req.max_freq_hz = max_freq;
1178
1179 ret = ti_sci_do_xfer(&xfer);
1180 if (ret) {
1181 ERROR("Transfer send failed (%d)\n", ret);
1182 return ret;
1183 }
1184
1185 return 0;
1186 }
1187
1188 /**
1189 * ti_sci_clock_get_freq() - Get current frequency
1190 *
1191 * @dev_id: Device identifier this request is for
1192 * @clk_id: Clock identifier for the device for this request.
1193 * Each device has its own set of clock inputs. This indexes
1194 * which clock input to modify.
1195 * @freq: Currently frequency in Hz
1196 *
1197 * Return: 0 if all goes well, else appropriate error message
1198 */
ti_sci_clock_get_freq(uint32_t dev_id,uint8_t clk_id,uint64_t * freq)1199 int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1200 {
1201 struct ti_sci_msg_req_get_clock_freq req;
1202 struct ti_sci_msg_resp_get_clock_freq resp;
1203
1204 struct ti_sci_xfer xfer;
1205 int ret;
1206
1207 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
1208 &req, sizeof(req),
1209 &resp, sizeof(resp),
1210 &xfer);
1211 if (ret) {
1212 ERROR("Message alloc failed (%d)\n", ret);
1213 return ret;
1214 }
1215
1216 req.dev_id = dev_id;
1217 req.clk_id = clk_id;
1218
1219 ret = ti_sci_do_xfer(&xfer);
1220 if (ret) {
1221 ERROR("Transfer send failed (%d)\n", ret);
1222 return ret;
1223 }
1224
1225 *freq = resp.freq_hz;
1226
1227 return 0;
1228 }
1229
1230 /**
1231 * ti_sci_core_reboot() - Command to request system reset
1232 *
1233 * Return: 0 if all goes well, else appropriate error message
1234 */
ti_sci_core_reboot(void)1235 int ti_sci_core_reboot(void)
1236 {
1237 struct ti_sci_msg_req_reboot req;
1238 struct ti_sci_msg_hdr resp;
1239
1240 struct ti_sci_xfer xfer;
1241 int ret;
1242
1243 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
1244 &req, sizeof(req),
1245 &resp, sizeof(resp),
1246 &xfer);
1247 if (ret) {
1248 ERROR("Message alloc failed (%d)\n", ret);
1249 return ret;
1250 }
1251 req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET;
1252
1253 ret = ti_sci_do_xfer(&xfer);
1254 if (ret) {
1255 ERROR("Transfer send failed (%d)\n", ret);
1256 return ret;
1257 }
1258
1259 return 0;
1260 }
1261
1262 /**
1263 * ti_sci_proc_request() - Request a physical processor control
1264 *
1265 * @proc_id: Processor ID this request is for
1266 *
1267 * Return: 0 if all goes well, else appropriate error message
1268 */
ti_sci_proc_request(uint8_t proc_id)1269 int ti_sci_proc_request(uint8_t proc_id)
1270 {
1271 struct ti_sci_msg_req_proc_request req;
1272 struct ti_sci_msg_hdr resp;
1273
1274 struct ti_sci_xfer xfer;
1275 int ret;
1276
1277 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
1278 &req, sizeof(req),
1279 &resp, sizeof(resp),
1280 &xfer);
1281 if (ret) {
1282 ERROR("Message alloc failed (%d)\n", ret);
1283 return ret;
1284 }
1285
1286 req.processor_id = proc_id;
1287
1288 ret = ti_sci_do_xfer(&xfer);
1289 if (ret) {
1290 ERROR("Transfer send failed (%d)\n", ret);
1291 return ret;
1292 }
1293
1294 return 0;
1295 }
1296
1297 /**
1298 * ti_sci_proc_release() - Release a physical processor control
1299 *
1300 * @proc_id: Processor ID this request is for
1301 *
1302 * Return: 0 if all goes well, else appropriate error message
1303 */
ti_sci_proc_release(uint8_t proc_id)1304 int ti_sci_proc_release(uint8_t proc_id)
1305 {
1306 struct ti_sci_msg_req_proc_release req;
1307 struct ti_sci_msg_hdr resp;
1308
1309 struct ti_sci_xfer xfer;
1310 int ret;
1311
1312 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
1313 &req, sizeof(req),
1314 &resp, sizeof(resp),
1315 &xfer);
1316 if (ret) {
1317 ERROR("Message alloc failed (%d)\n", ret);
1318 return ret;
1319 }
1320
1321 req.processor_id = proc_id;
1322
1323 ret = ti_sci_do_xfer(&xfer);
1324 if (ret) {
1325 ERROR("Transfer send failed (%d)\n", ret);
1326 return ret;
1327 }
1328
1329 return 0;
1330 }
1331
1332 /**
1333 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1334 * the processor's access control list.
1335 *
1336 * @proc_id: Processor ID this request is for
1337 * @host_id: Host ID to get the control of the processor
1338 *
1339 * Return: 0 if all goes well, else appropriate error message
1340 */
ti_sci_proc_handover(uint8_t proc_id,uint8_t host_id)1341 int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1342 {
1343 struct ti_sci_msg_req_proc_handover req;
1344 struct ti_sci_msg_hdr resp;
1345
1346 struct ti_sci_xfer xfer;
1347 int ret;
1348
1349 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
1350 &req, sizeof(req),
1351 &resp, sizeof(resp),
1352 &xfer);
1353 if (ret) {
1354 ERROR("Message alloc failed (%d)\n", ret);
1355 return ret;
1356 }
1357
1358 req.processor_id = proc_id;
1359 req.host_id = host_id;
1360
1361 ret = ti_sci_do_xfer(&xfer);
1362 if (ret) {
1363 ERROR("Transfer send failed (%d)\n", ret);
1364 return ret;
1365 }
1366
1367 return 0;
1368 }
1369
1370 /**
1371 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1372 *
1373 * @proc_id: Processor ID this request is for
1374 * @config_flags_set: Configuration flags to be set
1375 * @config_flags_clear: Configuration flags to be cleared
1376 *
1377 * Return: 0 if all goes well, else appropriate error message
1378 */
ti_sci_proc_set_boot_cfg(uint8_t proc_id,uint64_t bootvector,uint32_t config_flags_set,uint32_t config_flags_clear)1379 int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1380 uint32_t config_flags_set,
1381 uint32_t config_flags_clear)
1382 {
1383 struct ti_sci_msg_req_set_proc_boot_config req;
1384 struct ti_sci_msg_hdr resp;
1385
1386 struct ti_sci_xfer xfer;
1387 int ret;
1388
1389 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
1390 &req, sizeof(req),
1391 &resp, sizeof(resp),
1392 &xfer);
1393 if (ret) {
1394 ERROR("Message alloc failed (%d)\n", ret);
1395 return ret;
1396 }
1397
1398 req.processor_id = proc_id;
1399 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1400 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1401 TISCI_ADDR_HIGH_SHIFT;
1402 req.config_flags_set = config_flags_set;
1403 req.config_flags_clear = config_flags_clear;
1404
1405 ret = ti_sci_do_xfer(&xfer);
1406 if (ret) {
1407 ERROR("Transfer send failed (%d)\n", ret);
1408 return ret;
1409 }
1410
1411 return 0;
1412 }
1413
1414 /**
1415 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1416 *
1417 * @proc_id: Processor ID this request is for
1418 * @control_flags_set: Control flags to be set
1419 * @control_flags_clear: Control flags to be cleared
1420 *
1421 * Return: 0 if all goes well, else appropriate error message
1422 */
ti_sci_proc_set_boot_ctrl(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1423 int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1424 uint32_t control_flags_clear)
1425 {
1426 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1427 struct ti_sci_msg_hdr resp;
1428
1429 struct ti_sci_xfer xfer;
1430 int ret;
1431
1432 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1433 &req, sizeof(req),
1434 &resp, sizeof(resp),
1435 &xfer);
1436 if (ret) {
1437 ERROR("Message alloc failed (%d)\n", ret);
1438 return ret;
1439 }
1440
1441 req.processor_id = proc_id;
1442 req.control_flags_set = control_flags_set;
1443 req.control_flags_clear = control_flags_clear;
1444
1445 ret = ti_sci_do_xfer(&xfer);
1446 if (ret) {
1447 ERROR("Transfer send failed (%d)\n", ret);
1448 return ret;
1449 }
1450
1451 return 0;
1452 }
1453
1454 /**
1455 * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1456 * without requesting or waiting for a
1457 * response.
1458 *
1459 * @proc_id: Processor ID this request is for
1460 * @control_flags_set: Control flags to be set
1461 * @control_flags_clear: Control flags to be cleared
1462 *
1463 * Return: 0 if all goes well, else appropriate error message
1464 */
ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,uint32_t control_flags_set,uint32_t control_flags_clear)1465 int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1466 uint32_t control_flags_set,
1467 uint32_t control_flags_clear)
1468 {
1469 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1470 struct ti_sci_xfer xfer;
1471 int ret;
1472
1473 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
1474 &req, sizeof(req),
1475 NULL, 0,
1476 &xfer);
1477 if (ret != 0U) {
1478 ERROR("Message alloc failed (%d)\n", ret);
1479 return ret;
1480 }
1481
1482 req.processor_id = proc_id;
1483 req.control_flags_set = control_flags_set;
1484 req.control_flags_clear = control_flags_clear;
1485
1486 ret = ti_sci_do_xfer(&xfer);
1487 if (ret != 0U) {
1488 ERROR("Transfer send failed (%d)\n", ret);
1489 return ret;
1490 }
1491
1492 return 0;
1493 }
1494
1495 /**
1496 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1497 * processor configuration flags
1498 *
1499 * @proc_id: Processor ID this request is for
1500 * @cert_addr: Memory address at which payload image certificate is located
1501 *
1502 * Return: 0 if all goes well, else appropriate error message
1503 */
ti_sci_proc_auth_boot_image(uint8_t proc_id,uint64_t cert_addr)1504 int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1505 {
1506 struct ti_sci_msg_req_proc_auth_boot_image req;
1507 struct ti_sci_msg_hdr resp;
1508
1509 struct ti_sci_xfer xfer;
1510 int ret;
1511
1512 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMAGE, 0,
1513 &req, sizeof(req),
1514 &resp, sizeof(resp),
1515 &xfer);
1516 if (ret) {
1517 ERROR("Message alloc failed (%d)\n", ret);
1518 return ret;
1519 }
1520
1521 req.processor_id = proc_id;
1522 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1523 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1524 TISCI_ADDR_HIGH_SHIFT;
1525
1526 ret = ti_sci_do_xfer(&xfer);
1527 if (ret) {
1528 ERROR("Transfer send failed (%d)\n", ret);
1529 return ret;
1530 }
1531
1532 return 0;
1533 }
1534
1535 /**
1536 * ti_sci_proc_get_boot_status() - Get the processor boot status
1537 *
1538 * @proc_id: Processor ID this request is for
1539 *
1540 * Return: 0 if all goes well, else appropriate error message
1541 */
ti_sci_proc_get_boot_status(uint8_t proc_id,uint64_t * bv,uint32_t * cfg_flags,uint32_t * ctrl_flags,uint32_t * sts_flags)1542 int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1543 uint32_t *cfg_flags,
1544 uint32_t *ctrl_flags,
1545 uint32_t *sts_flags)
1546 {
1547 struct ti_sci_msg_req_get_proc_boot_status req;
1548 struct ti_sci_msg_resp_get_proc_boot_status resp;
1549
1550 struct ti_sci_xfer xfer;
1551 int ret;
1552
1553 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
1554 &req, sizeof(req),
1555 &resp, sizeof(resp),
1556 &xfer);
1557 if (ret) {
1558 ERROR("Message alloc failed (%d)\n", ret);
1559 return ret;
1560 }
1561
1562 req.processor_id = proc_id;
1563
1564 ret = ti_sci_do_xfer(&xfer);
1565 if (ret) {
1566 ERROR("Transfer send failed (%d)\n", ret);
1567 return ret;
1568 }
1569
1570 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1571 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1572 TISCI_ADDR_HIGH_MASK);
1573 *cfg_flags = resp.config_flags;
1574 *ctrl_flags = resp.control_flags;
1575 *sts_flags = resp.status_flags;
1576
1577 return 0;
1578 }
1579
1580 /**
1581 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1582 *
1583 * @proc_id: Processor ID this request is for
1584 * @num_wait_iterations Total number of iterations we will check before
1585 * we will timeout and give up
1586 * @num_match_iterations How many iterations should we have continued
1587 * status to account for status bits glitching.
1588 * This is to make sure that match occurs for
1589 * consecutive checks. This implies that the
1590 * worst case should consider that the stable
1591 * time should at the worst be num_wait_iterations
1592 * num_match_iterations to prevent timeout.
1593 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1594 * between each status checks. This is the minimum
1595 * duration, and overhead of register reads and
1596 * checks are on top of this and can vary based on
1597 * varied conditions.
1598 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1599 * before the very first check in the first
1600 * iteration of status check loop. This is the
1601 * minimum duration, and overhead of register
1602 * reads and checks are.
1603 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1604 * status matching this field requested MUST be 1.
1605 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1606 * bits matching this field requested MUST be 1.
1607 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1608 * status matching this field requested MUST be 0.
1609 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1610 * bits matching this field requested MUST be 0.
1611 *
1612 * Return: 0 if all goes well, else appropriate error message
1613 */
ti_sci_proc_wait_boot_status(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1614 int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1615 uint8_t num_match_iterations,
1616 uint8_t delay_per_iteration_us,
1617 uint8_t delay_before_iterations_us,
1618 uint32_t status_flags_1_set_all_wait,
1619 uint32_t status_flags_1_set_any_wait,
1620 uint32_t status_flags_1_clr_all_wait,
1621 uint32_t status_flags_1_clr_any_wait)
1622 {
1623 struct ti_sci_msg_req_wait_proc_boot_status req;
1624 struct ti_sci_msg_hdr resp;
1625
1626 struct ti_sci_xfer xfer;
1627 int ret;
1628
1629 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1630 &req, sizeof(req),
1631 &resp, sizeof(resp),
1632 &xfer);
1633 if (ret) {
1634 ERROR("Message alloc failed (%d)\n", ret);
1635 return ret;
1636 }
1637
1638 req.processor_id = proc_id;
1639 req.num_wait_iterations = num_wait_iterations;
1640 req.num_match_iterations = num_match_iterations;
1641 req.delay_per_iteration_us = delay_per_iteration_us;
1642 req.delay_before_iterations_us = delay_before_iterations_us;
1643 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1644 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1645 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1646 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1647
1648 ret = ti_sci_do_xfer(&xfer);
1649 if (ret) {
1650 ERROR("Transfer send failed (%d)\n", ret);
1651 return ret;
1652 }
1653
1654 return 0;
1655 }
1656
1657 /**
1658 * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1659 * without requesting or waiting for
1660 * a response.
1661 *
1662 * @proc_id: Processor ID this request is for
1663 * @num_wait_iterations Total number of iterations we will check before
1664 * we will timeout and give up
1665 * @num_match_iterations How many iterations should we have continued
1666 * status to account for status bits glitching.
1667 * This is to make sure that match occurs for
1668 * consecutive checks. This implies that the
1669 * worst case should consider that the stable
1670 * time should at the worst be num_wait_iterations
1671 * num_match_iterations to prevent timeout.
1672 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1673 * between each status checks. This is the minimum
1674 * duration, and overhead of register reads and
1675 * checks are on top of this and can vary based on
1676 * varied conditions.
1677 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1678 * before the very first check in the first
1679 * iteration of status check loop. This is the
1680 * minimum duration, and overhead of register
1681 * reads and checks are.
1682 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1683 * status matching this field requested MUST be 1.
1684 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1685 * bits matching this field requested MUST be 1.
1686 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1687 * status matching this field requested MUST be 0.
1688 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1689 * bits matching this field requested MUST be 0.
1690 *
1691 * Return: 0 if all goes well, else appropriate error message
1692 */
ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,uint8_t num_wait_iterations,uint8_t num_match_iterations,uint8_t delay_per_iteration_us,uint8_t delay_before_iterations_us,uint32_t status_flags_1_set_all_wait,uint32_t status_flags_1_set_any_wait,uint32_t status_flags_1_clr_all_wait,uint32_t status_flags_1_clr_any_wait)1693 int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1694 uint8_t num_wait_iterations,
1695 uint8_t num_match_iterations,
1696 uint8_t delay_per_iteration_us,
1697 uint8_t delay_before_iterations_us,
1698 uint32_t status_flags_1_set_all_wait,
1699 uint32_t status_flags_1_set_any_wait,
1700 uint32_t status_flags_1_clr_all_wait,
1701 uint32_t status_flags_1_clr_any_wait)
1702 {
1703 struct ti_sci_msg_req_wait_proc_boot_status req;
1704 struct ti_sci_xfer xfer;
1705 int ret;
1706
1707 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
1708 &req, sizeof(req),
1709 NULL, 0,
1710 &xfer);
1711 if (ret != 0U) {
1712 ERROR("Message alloc failed (%d)\n", ret);
1713 return ret;
1714 }
1715
1716 req.processor_id = proc_id;
1717 req.num_wait_iterations = num_wait_iterations;
1718 req.num_match_iterations = num_match_iterations;
1719 req.delay_per_iteration_us = delay_per_iteration_us;
1720 req.delay_before_iterations_us = delay_before_iterations_us;
1721 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1722 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1723 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1724 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1725
1726 ret = ti_sci_do_xfer(&xfer);
1727 if (ret != 0U) {
1728 ERROR("Transfer send failed (%d)\n", ret);
1729 return ret;
1730 }
1731
1732 return 0;
1733 }
1734
1735 /**
1736 * ti_sci_enter_sleep - Command to initiate system transition into suspend.
1737 *
1738 * @proc_id: Processor ID.
1739 * @mode: Low power mode to enter.
1740 * @core_resume_addr: Address that core should be
1741 * resumed from after low power transition.
1742 *
1743 * Return: 0 if all goes well, else appropriate error message
1744 */
ti_sci_enter_sleep(uint8_t proc_id,uint8_t mode,uint64_t core_resume_addr)1745 int ti_sci_enter_sleep(uint8_t proc_id,
1746 uint8_t mode,
1747 uint64_t core_resume_addr)
1748 {
1749 struct ti_sci_msg_req_enter_sleep req;
1750 struct ti_sci_xfer xfer;
1751 int ret;
1752
1753 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_ENTER_SLEEP, 0,
1754 &req, sizeof(req),
1755 NULL, 0,
1756 &xfer);
1757 if (ret != 0U) {
1758 ERROR("Message alloc failed (%d)\n", ret);
1759 return ret;
1760 }
1761
1762 req.processor_id = proc_id;
1763 req.mode = mode;
1764 req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK;
1765 req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >>
1766 TISCI_ADDR_HIGH_SHIFT;
1767
1768 ret = ti_sci_do_xfer(&xfer);
1769 if (ret != 0U) {
1770 ERROR("Transfer send failed (%d)\n", ret);
1771 return ret;
1772 }
1773
1774 return 0;
1775 }
1776
1777 /**
1778 * ti_sci_lpm_get_next_sys_mode() - Get next LPM system mode
1779 *
1780 * @next_mode: pointer to a variable that will store the next mode
1781 *
1782 * Return: 0 if all goes well, else appropriate error message
1783 */
ti_sci_lpm_get_next_sys_mode(uint8_t * next_mode)1784 int ti_sci_lpm_get_next_sys_mode(uint8_t *next_mode)
1785 {
1786 struct ti_sci_msg_req_lpm_get_next_sys_mode req;
1787 struct ti_sci_msg_resp_lpm_get_next_sys_mode resp;
1788 struct ti_sci_xfer xfer;
1789 int ret;
1790
1791 if (next_mode == NULL) {
1792 return -EINVAL;
1793 }
1794
1795 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_LPM_GET_NEXT_SYS_MODE, 0,
1796 &req, sizeof(req),
1797 &resp, sizeof(resp),
1798 &xfer);
1799 if (ret != 0) {
1800 ERROR("Message alloc failed (%d)\n", ret);
1801 return ret;
1802 }
1803
1804 ret = ti_sci_do_xfer(&xfer);
1805 if (ret != 0) {
1806 ERROR("Transfer send failed (%d)\n", ret);
1807 return ret;
1808 }
1809
1810 *next_mode = resp.mode;
1811
1812 return 0;
1813 }
1814
ti_sci_boot_notification(void)1815 int ti_sci_boot_notification(void)
1816 {
1817 struct tisci_msg_boot_notification_msg msg;
1818 struct ti_sci_msg rx_msg;
1819 int ret;
1820
1821 rx_msg.buf = (uint8_t *)&msg;
1822 rx_msg.len = sizeof(msg);
1823
1824 /* Receive the message */
1825 ret = ti_sci_transport_recv(RX_SECURE_TRANSPORT_CHANNEL_ID, &rx_msg);
1826 if (ret) {
1827 ERROR("Message receive failed (%d)\n", ret);
1828 return ret;
1829 }
1830
1831 /* Check for proper message ID */
1832 if (msg.hdr.type != TI_SCI_MSG_BOOT_NOTIFICATION) {
1833 ERROR("%s: Command expected 0x%x, but received 0x%x\n",
1834 __func__, TI_SCI_MSG_BOOT_NOTIFICATION,
1835 msg.hdr.type);
1836 return -EINVAL;
1837 }
1838 VERBOSE("%s: boot notification received from TIFS\n", __func__);
1839
1840 return 0;
1841 }
1842
1843 /*
1844 * ti_sci_encrypt_tfa - Ask TIFS to encrypt TFA at a specific address
1845 *
1846 * @src_tfa_addr: Address where the TFA lies unencrypted
1847 * @src_tfa_len: Size of the TFA unencrypted
1848 *
1849 * Return: 0 if all goes well, else appropriate error message
1850 */
ti_sci_encrypt_tfa(uint64_t src_tfa_addr,size_t src_tfa_len)1851 int ti_sci_encrypt_tfa(uint64_t src_tfa_addr, size_t src_tfa_len)
1852 {
1853 struct ti_sci_msg_req_encrypt_tfa req;
1854 struct ti_sci_msg_resp_encrypt_tfa resp;
1855 struct ti_sci_xfer xfer;
1856 int ret;
1857
1858 ret = ti_sci_setup_one_xfer(TISCI_MSG_LPM_ENCRYPT_TFA, 0,
1859 &req, sizeof(req),
1860 &resp, sizeof(resp),
1861 &xfer);
1862 if (ret) {
1863 ERROR("Message alloc failed (%d)\n", ret);
1864 return ret;
1865 }
1866
1867 req.src_tfa_addr = src_tfa_addr;
1868 req.src_tfa_len = src_tfa_len;
1869
1870 ret = ti_sci_do_xfer(&xfer);
1871 if (ret) {
1872 ERROR("Transfer send failed (%d)\n", ret);
1873 return ret;
1874 }
1875
1876 return 0;
1877 }
1878
1879 /**
1880 * ti_sci_set_fwl_region - Request for configuring a firewall region
1881 *
1882 * @fwl_id: Firewall ID in question. fwl_id is defined in the TRM.
1883 * @region: Region or channel number to set config info. This field
1884 * is unused in case of a simple firewall and must be
1885 * initialized to zero. In case of a region based
1886 * firewall, this field indicates the region in question
1887 * (index starting from 0). In case of a channel based
1888 * firewall, this field indicates the channel in question
1889 * (index starting from 0).
1890 * @n_permission_regs: Number of permission registers to set
1891 * @control: Contents of the firewall CONTROL register to set
1892 * @permissions: Contents of the firewall PERMISSION register to set
1893 * @start_address: Contents of the firewall START_ADDRESS register to set
1894 * @end_address: Contents of the firewall END_ADDRESS register to set
1895 *
1896 * Return: 0 if all goes well, else appropriate error message
1897 */
ti_sci_set_fwl_region(uint16_t fwl_id,uint16_t region,uint32_t n_permission_regs,uint32_t control,const uint32_t * permissions,uint64_t start_address,uint64_t end_address)1898 int ti_sci_set_fwl_region(uint16_t fwl_id, uint16_t region,
1899 uint32_t n_permission_regs, uint32_t control,
1900 const uint32_t *permissions,
1901 uint64_t start_address, uint64_t end_address)
1902 {
1903 struct ti_sci_msg_req_fwl_set_firewall_region req = { };
1904 struct ti_sci_msg_resp_fwl_set_firewall_region resp = { };
1905 struct ti_sci_xfer xfer = { };
1906 unsigned int i;
1907 int ret;
1908
1909 assert(n_permission_regs <= FWL_MAX_PRIVID_SLOTS);
1910
1911 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_FWL_SET, 0,
1912 &req, sizeof(req),
1913 &resp, sizeof(resp),
1914 &xfer);
1915 if (ret != 0U) {
1916 ERROR("Message alloc failed (%d)\n", ret);
1917 return ret;
1918 }
1919
1920 req.fwl_id = fwl_id;
1921 req.region = region;
1922 req.n_permission_regs = n_permission_regs;
1923 req.control = control;
1924 for (i = 0; i < n_permission_regs; i++)
1925 req.permissions[i] = permissions[i];
1926 req.start_address = start_address;
1927 req.end_address = end_address;
1928
1929 ret = ti_sci_do_xfer(&xfer);
1930 if (ret != 0U) {
1931 ERROR("Transfer send failed (%d)\n", ret);
1932 return ret;
1933 }
1934
1935 return 0;
1936 }
1937
1938 /**
1939 * ti_sci_get_fwl_region - Request for getting a firewall region
1940 *
1941 * @fwl_id: Firewall ID in question. fwl_id is defined in the TRM.
1942 * @region: Region or channel number to set config info. This field
1943 * is unused in case of a simple firewall and must be
1944 * initialized to zero. In case of a region based
1945 * firewall, this field indicates the region in question
1946 * (index starting from 0). In case of a channel based
1947 * firewall, this field indicates the channel in question
1948 * (index starting from 0).
1949 * @n_permission_regs: Number of permission registers to retrieve
1950 * @control: Contents of the firewall CONTROL register
1951 * @permissions: Contents of the firewall PERMISSION register
1952 * @start_address: Contents of the firewall START_ADDRESS register
1953 * @end_address: Contents of the firewall END_ADDRESS register
1954 *
1955 * Return: 0 if all goes well, else appropriate error message
1956 */
ti_sci_get_fwl_region(uint16_t fwl_id,uint16_t region,uint32_t n_permission_regs,uint32_t * control,uint32_t * permissions,uint64_t * start_address,uint64_t * end_address)1957 int ti_sci_get_fwl_region(uint16_t fwl_id, uint16_t region,
1958 uint32_t n_permission_regs, uint32_t *control,
1959 uint32_t *permissions,
1960 uint64_t *start_address, uint64_t *end_address)
1961 {
1962 struct ti_sci_msg_req_fwl_get_firewall_region req = { };
1963 struct ti_sci_msg_resp_fwl_get_firewall_region resp = { };
1964 struct ti_sci_xfer xfer = { };
1965 unsigned int i;
1966 int ret;
1967
1968 assert(n_permission_regs <= FWL_MAX_PRIVID_SLOTS);
1969
1970 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_FWL_GET, 0,
1971 &req, sizeof(req),
1972 &resp, sizeof(resp),
1973 &xfer);
1974 if (ret != 0U) {
1975 ERROR("Message alloc failed (%d)\n", ret);
1976 return ret;
1977 }
1978
1979 req.fwl_id = fwl_id;
1980 req.region = region;
1981 req.n_permission_regs = n_permission_regs;
1982
1983 ret = ti_sci_do_xfer(&xfer);
1984 if (ret != 0U) {
1985 ERROR("Transfer send failed (%d)\n", ret);
1986 return ret;
1987 }
1988
1989 *control = resp.control;
1990 for (i = 0; i < n_permission_regs; i++)
1991 permissions[i] = resp.permissions[i];
1992 *start_address = resp.start_address;
1993 *end_address = resp.end_address;
1994
1995 return 0;
1996 }
1997
1998 /**
1999 * ti_sci_change_fwl_owner() - Request for changing a firewall owner
2000 *
2001 * @fwl_id: Firewall ID in question. fwl_id is defined in the TRM.
2002 * @region: Region or channel number to set config info. This field
2003 * is unused in case of a simple firewall and must be
2004 * initialized to zero. In case of a region based
2005 * firewall, this field indicates the region in question
2006 * (index starting from 0). In case of a channel based
2007 * firewall, this field indicates the channel in question
2008 * (index starting from 0).
2009 * @owner_index: New owner index to transfer ownership to
2010 * @owner_privid: New owner priv-ID returned by DMSC/TIFS. This field is
2011 * currently initialized to zero by DMSC/TIFS.
2012 * @owner_permission_bits: New owner permission bits returned by DMSC/TIFS. This
2013 * field is currently initialized to zero by DMSC/TIFS.
2014 *
2015 * Return: 0 if all goes well, else appropriate error message
2016 */
ti_sci_change_fwl_owner(uint16_t fwl_id,uint16_t region,uint8_t owner_index,uint8_t * owner_privid,uint16_t * owner_permission_bits)2017 int ti_sci_change_fwl_owner(uint16_t fwl_id, uint16_t region,
2018 uint8_t owner_index, uint8_t *owner_privid,
2019 uint16_t *owner_permission_bits)
2020 {
2021 struct ti_sci_msg_req_fwl_change_owner_info req = { };
2022 struct ti_sci_msg_resp_fwl_change_owner_info resp = { };
2023 struct ti_sci_xfer xfer = { };
2024 int ret;
2025
2026 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_FWL_CHANGE_OWNER, 0,
2027 &req, sizeof(req),
2028 &resp, sizeof(resp),
2029 &xfer);
2030 if (ret != 0U) {
2031 ERROR("Message alloc failed (%d)\n", ret);
2032 return ret;
2033 }
2034
2035 req.fwl_id = fwl_id;
2036 req.region = region;
2037 req.owner_index = owner_index;
2038
2039 ret = ti_sci_do_xfer(&xfer);
2040 if (ret != 0U) {
2041 ERROR("Transfer send failed (%d)\n", ret);
2042 return ret;
2043 }
2044
2045 *owner_privid = resp.owner_privid;
2046 *owner_permission_bits = resp.owner_permission_bits;
2047
2048 return 0;
2049 }
2050