blob: f0813e5b0b4ba06beb45e6e196779d9b6d230da7 [file] [log] [blame]
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +00001/*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
Dhruva Gole7bff7bf2025-03-21 11:55:20 +05305 * Copyright (C) 2018-2025 Texas Instruments Incorporated - https://www.ti.com/
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +00006 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000010#include <errno.h>
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000011#include <stdbool.h>
12#include <stddef.h>
13#include <string.h>
14
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000015#include <platform_def.h>
Andrew Davis312eec32023-03-13 13:56:27 -050016#include <lib/bakery_lock.h>
Antonio Nino Diaz09d40e02018-12-14 00:18:21 +000017
18#include <common/debug.h>
Dhruva Golef70572e2025-03-21 11:53:19 +053019#include <ti_sci_transport.h>
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000020
21#include "ti_sci_protocol.h"
22#include "ti_sci.h"
23
Andrew F. Davis32967a32020-01-16 15:58:34 -060024#if USE_COHERENT_MEM
Chris Kayda043412023-02-14 11:30:04 +000025__section(".tzfw_coherent_mem")
Andrew F. Davis32967a32020-01-16 15:58:34 -060026#endif
Andrew F. Davis592ede22020-01-16 15:34:31 -060027static uint8_t message_sequence;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000028
Andrew Davis312eec32023-03-13 13:56:27 -050029DEFINE_BAKERY_LOCK(ti_sci_xfer_lock);
30
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000031/**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_message: Receive message
35 */
36struct ti_sci_xfer {
Dhruva Gole7bff7bf2025-03-21 11:55:20 +053037 struct ti_sci_msg tx_message;
38 struct ti_sci_msg rx_message;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000039};
40
41/**
42 * ti_sci_setup_one_xfer() - Setup one message type
43 *
44 * @msg_type: Message type
45 * @msg_flags: Flag to set for the message
46 * @tx_buf: Buffer to be sent to mailbox channel
47 * @tx_message_size: transmit message size
48 * @rx_buf: Buffer to be received from mailbox channel
49 * @rx_message_size: receive message size
50 *
51 * Helper function which is used by various command functions that are
52 * exposed to clients of this driver for allocating a message traffic event.
53 *
54 * Return: 0 if all goes well, else appropriate error message
55 */
56static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
57 void *tx_buf,
58 size_t tx_message_size,
59 void *rx_buf,
60 size_t rx_message_size,
61 struct ti_sci_xfer *xfer)
62{
63 struct ti_sci_msg_hdr *hdr;
64
65 /* Ensure we have sane transfer sizes */
Andrew F. Davis592ede22020-01-16 15:34:31 -060066 if (rx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
67 tx_message_size > TI_SCI_MAX_MESSAGE_SIZE ||
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000068 tx_message_size < sizeof(*hdr))
69 return -ERANGE;
70
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000071 hdr = (struct ti_sci_msg_hdr *)tx_buf;
Dhruva Golec2dcc592025-03-21 13:46:14 +053072
73 /* TODO: Calculate checksum */
74 hdr->sec_hdr.checksum = 0;
Andrew F. Davis592ede22020-01-16 15:34:31 -060075 hdr->seq = ++message_sequence;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000076 hdr->type = msg_type;
Andrew F. Davis592ede22020-01-16 15:34:31 -060077 hdr->host = TI_SCI_HOST_ID;
Andrew Davis6688fd72022-05-16 13:17:02 -050078 hdr->flags = msg_flags;
79 /* Request a response if rx_message_size is non-zero */
80 if (rx_message_size != 0U) {
81 hdr->flags |= TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
82 }
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +000083
84 xfer->tx_message.buf = tx_buf;
85 xfer->tx_message.len = tx_message_size;
86
87 xfer->rx_message.buf = rx_buf;
88 xfer->rx_message.len = rx_message_size;
89
90 return 0;
91}
92
93/**
94 * ti_sci_get_response() - Receive response from mailbox channel
95 *
96 * @xfer: Transfer to initiate and wait for response
97 * @chan: Channel to receive the response
98 *
99 * Return: 0 if all goes well, else appropriate error message
100 */
Dhruva Gole7bff7bf2025-03-21 11:55:20 +0530101static int ti_sci_get_response(struct ti_sci_msg *msg,
Dhruva Golea8de9712025-03-27 14:45:49 +0530102 enum ti_sci_transport_chan_id chan)
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000103{
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000104 struct ti_sci_msg_hdr *hdr;
Andrew F. Davis71a35272019-04-10 12:40:12 -0400105 unsigned int retry = 5;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000106 int ret;
107
Andrew F. Davis71a35272019-04-10 12:40:12 -0400108 for (; retry > 0; retry--) {
109 /* Receive the response */
Dhruva Gole7bff7bf2025-03-21 11:55:20 +0530110 ret = ti_sci_transport_recv(chan, msg);
Andrew F. Davis71a35272019-04-10 12:40:12 -0400111 if (ret) {
112 ERROR("Message receive failed (%d)\n", ret);
113 return ret;
114 }
115
116 /* msg is updated by Secure Proxy driver */
117 hdr = (struct ti_sci_msg_hdr *)msg->buf;
118
119 /* Sanity check for message response */
Andrew F. Davis592ede22020-01-16 15:34:31 -0600120 if (hdr->seq == message_sequence)
Andrew F. Davis71a35272019-04-10 12:40:12 -0400121 break;
122 else
123 WARN("Message with sequence ID %u is not expected\n", hdr->seq);
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000124 }
Andrew F. Davis71a35272019-04-10 12:40:12 -0400125 if (!retry) {
126 ERROR("Timed out waiting for message\n");
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000127 return -EINVAL;
128 }
129
Andrew F. Davis592ede22020-01-16 15:34:31 -0600130 if (msg->len > TI_SCI_MAX_MESSAGE_SIZE) {
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000131 ERROR("Unable to handle %lu xfer (max %d)\n",
Andrew F. Davis592ede22020-01-16 15:34:31 -0600132 msg->len, TI_SCI_MAX_MESSAGE_SIZE);
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000133 return -EINVAL;
134 }
135
Andrew F. Davis60d23322019-02-11 13:44:31 -0600136 if (!(hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK))
137 return -ENODEV;
138
Dhruva Golec2dcc592025-03-21 13:46:14 +0530139 /* TODO: Verify checksum */
140 (void)hdr->sec_hdr.checksum;
141
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000142 return 0;
143}
144
145/**
146 * ti_sci_do_xfer() - Do one transfer
147 *
148 * @xfer: Transfer to initiate and wait for response
149 *
150 * Return: 0 if all goes well, else appropriate error message
151 */
Andrew Davis3aa8d492022-11-11 12:49:38 -0600152static int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000153{
Dhruva Gole7bff7bf2025-03-21 11:55:20 +0530154 struct ti_sci_msg *tx_msg = &xfer->tx_message;
155 struct ti_sci_msg *rx_msg = &xfer->rx_message;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000156 int ret;
157
Andrew Davis312eec32023-03-13 13:56:27 -0500158 bakery_lock_get(&ti_sci_xfer_lock);
159
Andrew F. Davis73522f02019-01-04 12:49:16 -0600160 /* Clear any spurious messages in receive queue */
Dhruva Golea8de9712025-03-27 14:45:49 +0530161 ret = ti_sci_transport_clear_rx_thread(RX_SECURE_TRANSPORT_CHANNEL_ID);
Andrew F. Davis73522f02019-01-04 12:49:16 -0600162 if (ret) {
163 ERROR("Could not clear response queue (%d)\n", ret);
Andrew Davise92375e2023-10-31 10:18:35 -0500164 goto unlock;
Andrew F. Davis73522f02019-01-04 12:49:16 -0600165 }
166
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000167 /* Send the message */
Dhruva Golea8de9712025-03-27 14:45:49 +0530168 ret = ti_sci_transport_send(TX_SECURE_TRANSPORT_CHANNEL_ID, tx_msg);
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000169 if (ret) {
170 ERROR("Message sending failed (%d)\n", ret);
Andrew Davise92375e2023-10-31 10:18:35 -0500171 goto unlock;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000172 }
173
Andrew Davis6688fd72022-05-16 13:17:02 -0500174 /* Get the response if requested */
175 if (rx_msg->len != 0U) {
Dhruva Golea8de9712025-03-27 14:45:49 +0530176 ret = ti_sci_get_response(rx_msg, RX_SECURE_TRANSPORT_CHANNEL_ID);
Andrew Davis6688fd72022-05-16 13:17:02 -0500177 if (ret != 0U) {
178 ERROR("Failed to get response (%d)\n", ret);
Andrew Davise92375e2023-10-31 10:18:35 -0500179 goto unlock;
Andrew Davis6688fd72022-05-16 13:17:02 -0500180 }
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000181 }
182
Andrew Davise92375e2023-10-31 10:18:35 -0500183unlock:
Andrew Davis312eec32023-03-13 13:56:27 -0500184 bakery_lock_release(&ti_sci_xfer_lock);
185
Andrew Davise92375e2023-10-31 10:18:35 -0500186 return ret;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000187}
188
189/**
190 * ti_sci_get_revision() - Get the revision of the SCI entity
191 *
192 * Updates the SCI information in the internal data structure.
193 *
Manorit Chawdhry73d772d2024-01-29 12:40:54 +0530194 * @version: Structure containing the version info
195 *
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000196 * Return: 0 if all goes well, else appropriate error message
197 */
Manorit Chawdhry73d772d2024-01-29 12:40:54 +0530198int ti_sci_get_revision(struct ti_sci_msg_version *version)
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000199{
Manorit Chawdhry73d772d2024-01-29 12:40:54 +0530200 struct ti_sci_msg_resp_version rev_info;
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000201 struct ti_sci_msg_hdr hdr;
202 struct ti_sci_xfer xfer;
203 int ret;
204
205 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
206 &hdr, sizeof(hdr),
Manorit Chawdhry73d772d2024-01-29 12:40:54 +0530207 &rev_info, sizeof(rev_info),
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000208 &xfer);
209 if (ret) {
210 ERROR("Message alloc failed (%d)\n", ret);
211 return ret;
212 }
213
214 ret = ti_sci_do_xfer(&xfer);
215 if (ret) {
216 ERROR("Transfer send failed (%d)\n", ret);
217 return ret;
218 }
219
Manorit Chawdhry73d772d2024-01-29 12:40:54 +0530220 memcpy(version->firmware_description, rev_info.firmware_description,
221 sizeof(rev_info.firmware_description));
222 version->abi_major = rev_info.abi_major;
223 version->abi_minor = rev_info.abi_minor;
224 version->firmware_revision = rev_info.firmware_revision;
225 version->sub_version = rev_info.sub_version;
226 version->patch_version = rev_info.patch_version;
227
Andrew F. Davisb5c2e1c2018-05-04 19:06:09 +0000228 return 0;
229}
230
231/**
Andrew Davis7ab78282023-06-26 12:49:17 -0500232 * ti_sci_query_fw_caps() - Get the FW/SoC capabilities
233 * @handle: Pointer to TI SCI handle
234 * @fw_caps: Each bit in fw_caps indicating one FW/SOC capability
235 *
236 * Return: 0 if all went well, else returns appropriate error value.
237 */
238int ti_sci_query_fw_caps(uint64_t *fw_caps)
239{
240 struct ti_sci_msg_hdr req;
241 struct ti_sci_msg_resp_query_fw_caps resp;
242
243 struct ti_sci_xfer xfer;
244 int ret;
245
246 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_FW_CAPS, 0,
247 &req, sizeof(req),
248 &resp, sizeof(resp),
249 &xfer);
250 if (ret != 0U) {
251 ERROR("Message alloc failed (%d)\n", ret);
252 return ret;
253 }
254
255 ret = ti_sci_do_xfer(&xfer);
256 if (ret != 0U) {
257 ERROR("Transfer send failed (%d)\n", ret);
258 return ret;
259 }
260
261 if (fw_caps)
262 *fw_caps = resp.fw_caps;
263
264 return 0;
265}
266
267/**
Andrew F. Davis38584522018-05-04 19:06:10 +0000268 * ti_sci_device_set_state() - Set device state
269 *
270 * @id: Device identifier
271 * @flags: flags to setup for the device
272 * @state: State to move the device to
273 *
274 * Return: 0 if all goes well, else appropriate error message
275 */
Andrew F. Davis33baa1e2019-02-11 12:55:25 -0600276static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
Andrew F. Davis38584522018-05-04 19:06:10 +0000277{
278 struct ti_sci_msg_req_set_device_state req;
279 struct ti_sci_msg_hdr resp;
280
281 struct ti_sci_xfer xfer;
282 int ret;
283
Andrew F. Davis60d23322019-02-11 13:44:31 -0600284 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, flags,
Andrew F. Davis38584522018-05-04 19:06:10 +0000285 &req, sizeof(req),
286 &resp, sizeof(resp),
287 &xfer);
288 if (ret) {
289 ERROR("Message alloc failed (%d)\n", ret);
290 return ret;
291 }
292
293 req.id = id;
294 req.state = state;
295
296 ret = ti_sci_do_xfer(&xfer);
297 if (ret) {
298 ERROR("Transfer send failed (%d)\n", ret);
299 return ret;
300 }
301
Andrew F. Davis38584522018-05-04 19:06:10 +0000302 return 0;
303}
304
305/**
306 * ti_sci_device_get_state() - Get device state
307 *
308 * @id: Device Identifier
309 * @clcnt: Pointer to Context Loss Count
310 * @resets: pointer to resets
311 * @p_state: pointer to p_state
312 * @c_state: pointer to c_state
313 *
314 * Return: 0 if all goes well, else appropriate error message
315 */
Andrew F. Davis33baa1e2019-02-11 12:55:25 -0600316static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
317 uint32_t *resets, uint8_t *p_state,
318 uint8_t *c_state)
Andrew F. Davis38584522018-05-04 19:06:10 +0000319{
320 struct ti_sci_msg_req_get_device_state req;
321 struct ti_sci_msg_resp_get_device_state resp;
322
323 struct ti_sci_xfer xfer;
324 int ret;
325
326 if (!clcnt && !resets && !p_state && !c_state)
327 return -EINVAL;
328
329 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
330 &req, sizeof(req),
331 &resp, sizeof(resp),
332 &xfer);
333 if (ret) {
334 ERROR("Message alloc failed (%d)\n", ret);
335 return ret;
336 }
337
338 req.id = id;
339
340 ret = ti_sci_do_xfer(&xfer);
341 if (ret) {
342 ERROR("Transfer send failed (%d)\n", ret);
343 return ret;
344 }
345
Andrew F. Davis38584522018-05-04 19:06:10 +0000346 if (clcnt)
347 *clcnt = resp.context_loss_count;
348 if (resets)
349 *resets = resp.resets;
350 if (p_state)
351 *p_state = resp.programmed_state;
352 if (c_state)
353 *c_state = resp.current_state;
354
355 return 0;
356}
357
358/**
359 * ti_sci_device_get() - Request for device managed by TISCI
360 *
361 * @id: Device Identifier
362 *
363 * Request for the device - NOTE: the client MUST maintain integrity of
364 * usage count by balancing get_device with put_device. No refcounting is
365 * managed by driver for that purpose.
366 *
Andrew F. Davis38584522018-05-04 19:06:10 +0000367 * Return: 0 if all goes well, else appropriate error message
368 */
369int ti_sci_device_get(uint32_t id)
370{
Andrew F. Davisb3ca8aa2019-02-11 12:58:32 -0600371 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
372}
373
374/**
375 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
376 *
377 * @id: Device Identifier
378 *
379 * Request for the device - NOTE: the client MUST maintain integrity of
380 * usage count by balancing get_device with put_device. No refcounting is
381 * managed by driver for that purpose.
382 *
383 * NOTE: This _exclusive version of the get API is for exclusive access to the
384 * device. Any other host in the system will fail to get this device after this
385 * call until exclusive access is released with device_put or a non-exclusive
386 * set call.
387 *
388 * Return: 0 if all goes well, else appropriate error message
389 */
390int ti_sci_device_get_exclusive(uint32_t id)
391{
Andrew F. Davis38584522018-05-04 19:06:10 +0000392 return ti_sci_device_set_state(id,
393 MSG_FLAG_DEVICE_EXCLUSIVE,
394 MSG_DEVICE_SW_STATE_ON);
395}
396
397/**
398 * ti_sci_device_idle() - Idle a device managed by TISCI
399 *
400 * @id: Device Identifier
401 *
402 * Request for the device - NOTE: the client MUST maintain integrity of
403 * usage count by balancing get_device with put_device. No refcounting is
404 * managed by driver for that purpose.
405 *
406 * Return: 0 if all goes well, else appropriate error message
407 */
408int ti_sci_device_idle(uint32_t id)
409{
Andrew F. Davisb3ca8aa2019-02-11 12:58:32 -0600410 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
411}
412
413/**
414 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
415 *
416 * @id: Device Identifier
417 *
418 * Request for the device - NOTE: the client MUST maintain integrity of
419 * usage count by balancing get_device with put_device. No refcounting is
420 * managed by driver for that purpose.
421 *
422 * NOTE: This _exclusive version of the idle API is for exclusive access to
423 * the device. Any other host in the system will fail to get this device after
424 * this call until exclusive access is released with device_put or a
425 * non-exclusive set call.
426 *
427 * Return: 0 if all goes well, else appropriate error message
428 */
429int ti_sci_device_idle_exclusive(uint32_t id)
430{
Andrew F. Davis38584522018-05-04 19:06:10 +0000431 return ti_sci_device_set_state(id,
432 MSG_FLAG_DEVICE_EXCLUSIVE,
433 MSG_DEVICE_SW_STATE_RETENTION);
434}
435
436/**
437 * ti_sci_device_put() - Release a device managed by TISCI
438 *
439 * @id: Device Identifier
440 *
441 * Request for the device - NOTE: the client MUST maintain integrity of
442 * usage count by balancing get_device with put_device. No refcounting is
443 * managed by driver for that purpose.
444 *
445 * Return: 0 if all goes well, else appropriate error message
446 */
447int ti_sci_device_put(uint32_t id)
448{
449 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
450}
451
452/**
Andrew F. Davise9152c12019-02-11 14:18:53 -0600453 * ti_sci_device_put_no_wait() - Release a device without requesting or waiting
454 * for a response.
455 *
456 * @id: Device Identifier
457 *
458 * Request for the device - NOTE: the client MUST maintain integrity of
459 * usage count by balancing get_device with put_device. No refcounting is
460 * managed by driver for that purpose.
461 *
462 * Return: 0 if all goes well, else appropriate error message
463 */
464int ti_sci_device_put_no_wait(uint32_t id)
465{
466 struct ti_sci_msg_req_set_device_state req;
Andrew Davis6688fd72022-05-16 13:17:02 -0500467 struct ti_sci_xfer xfer;
Andrew F. Davise9152c12019-02-11 14:18:53 -0600468 int ret;
469
Manorit Chawdhryd7a71352023-08-22 16:15:40 +0530470 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE, 0,
Andrew Davis6688fd72022-05-16 13:17:02 -0500471 &req, sizeof(req),
472 NULL, 0,
473 &xfer);
474 if (ret != 0U) {
475 ERROR("Message alloc failed (%d)\n", ret);
476 return ret;
477 }
Andrew F. Davise9152c12019-02-11 14:18:53 -0600478
479 req.id = id;
480 req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
481
Andrew Davis6688fd72022-05-16 13:17:02 -0500482 ret = ti_sci_do_xfer(&xfer);
483 if (ret != 0U) {
484 ERROR("Transfer send failed (%d)\n", ret);
Andrew F. Davise9152c12019-02-11 14:18:53 -0600485 return ret;
486 }
487
Andrew F. Davise9152c12019-02-11 14:18:53 -0600488 return 0;
489}
490
491/**
Andrew F. Davis38584522018-05-04 19:06:10 +0000492 * ti_sci_device_is_valid() - Is the device valid
493 *
494 * @id: Device Identifier
495 *
496 * Return: 0 if all goes well and the device ID is valid, else return
497 * appropriate error
498 */
499int ti_sci_device_is_valid(uint32_t id)
500{
501 uint8_t unused;
502
503 /* check the device state which will also tell us if the ID is valid */
504 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
505}
506
507/**
508 * ti_sci_device_get_clcnt() - Get context loss counter
509 *
510 * @id: Device Identifier
511 * @count: Pointer to Context Loss counter to populate
512 *
513 * Return: 0 if all goes well, else appropriate error message
514 */
515int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
516{
517 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
518}
519
520/**
521 * ti_sci_device_is_idle() - Check if the device is requested to be idle
522 *
523 * @id: Device Identifier
524 * @r_state: true if requested to be idle
525 *
526 * Return: 0 if all goes well, else appropriate error message
527 */
528int ti_sci_device_is_idle(uint32_t id, bool *r_state)
529{
530 int ret;
531 uint8_t state;
532
533 if (!r_state)
534 return -EINVAL;
535
536 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
537 if (ret)
538 return ret;
539
540 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
541
542 return 0;
543}
544
545/**
546 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
547 *
548 * @id: Device Identifier
549 * @r_state: true if requested to be stopped
550 * @curr_state: true if currently stopped
551 *
552 * Return: 0 if all goes well, else appropriate error message
553 */
554int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
555{
556 int ret;
557 uint8_t p_state, c_state;
558
559 if (!r_state && !curr_state)
560 return -EINVAL;
561
562 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
563 if (ret)
564 return ret;
565
566 if (r_state)
567 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
568 if (curr_state)
569 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
570
571 return 0;
572}
573
574/**
575 * ti_sci_device_is_on() - Check if the device is requested to be ON
576 *
577 * @id: Device Identifier
578 * @r_state: true if requested to be ON
579 * @curr_state: true if currently ON and active
580 *
581 * Return: 0 if all goes well, else appropriate error message
582 */
583int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
584{
585 int ret;
586 uint8_t p_state, c_state;
587
588 if (!r_state && !curr_state)
589 return -EINVAL;
590
591 ret =
592 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
593 if (ret)
594 return ret;
595
596 if (r_state)
597 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
598 if (curr_state)
599 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
600
601 return 0;
602}
603
604/**
605 * ti_sci_device_is_trans() - Check if the device is currently transitioning
606 *
607 * @id: Device Identifier
608 * @curr_state: true if currently transitioning
609 *
610 * Return: 0 if all goes well, else appropriate error message
611 */
612int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
613{
614 int ret;
615 uint8_t state;
616
617 if (!curr_state)
618 return -EINVAL;
619
620 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
621 if (ret)
622 return ret;
623
624 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
625
626 return 0;
627}
628
629/**
630 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
631 *
632 * @id: Device Identifier
633 * @reset_state: Device specific reset bit field
634 *
635 * Return: 0 if all goes well, else appropriate error message
636 */
637int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
638{
639 struct ti_sci_msg_req_set_device_resets req;
640 struct ti_sci_msg_hdr resp;
641
642 struct ti_sci_xfer xfer;
643 int ret;
644
Andrew F. Davis60d23322019-02-11 13:44:31 -0600645 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS, 0,
Andrew F. Davis38584522018-05-04 19:06:10 +0000646 &req, sizeof(req),
647 &resp, sizeof(resp),
648 &xfer);
649 if (ret) {
650 ERROR("Message alloc failed (%d)\n", ret);
651 return ret;
652 }
653
654 req.id = id;
655 req.resets = reset_state;
656
657 ret = ti_sci_do_xfer(&xfer);
658 if (ret) {
659 ERROR("Transfer send failed (%d)\n", ret);
660 return ret;
661 }
662
Andrew F. Davis38584522018-05-04 19:06:10 +0000663 return 0;
664}
665
666/**
667 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
668 *
669 * @id: Device Identifier
670 * @reset_state: Pointer to reset state to populate
671 *
672 * Return: 0 if all goes well, else appropriate error message
673 */
674int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
675{
676 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
677}
678
679/**
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000680 * ti_sci_clock_set_state() - Set clock state helper
681 *
682 * @dev_id: Device identifier this request is for
683 * @clk_id: Clock identifier for the device for this request,
684 * Each device has its own set of clock inputs, This indexes
685 * which clock input to modify
686 * @flags: Header flags as needed
687 * @state: State to request for the clock
688 *
689 * Return: 0 if all goes well, else appropriate error message
690 */
691int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
692 uint32_t flags, uint8_t state)
693{
694 struct ti_sci_msg_req_set_clock_state req;
695 struct ti_sci_msg_hdr resp;
696
697 struct ti_sci_xfer xfer;
698 int ret;
699
Andrew F. Davis60d23322019-02-11 13:44:31 -0600700 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE, flags,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000701 &req, sizeof(req),
702 &resp, sizeof(resp),
703 &xfer);
704 if (ret) {
705 ERROR("Message alloc failed (%d)\n", ret);
706 return ret;
707 }
708
709 req.dev_id = dev_id;
710 req.clk_id = clk_id;
711 req.request_state = state;
712
713 ret = ti_sci_do_xfer(&xfer);
714 if (ret) {
715 ERROR("Transfer send failed (%d)\n", ret);
716 return ret;
717 }
718
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000719 return 0;
720}
721
722/**
723 * ti_sci_clock_get_state() - Get clock state helper
724 *
725 * @dev_id: Device identifier this request is for
726 * @clk_id: Clock identifier for the device for this request.
727 * Each device has its own set of clock inputs. This indexes
728 * which clock input to modify.
729 * @programmed_state: State requested for clock to move to
730 * @current_state: State that the clock is currently in
731 *
732 * Return: 0 if all goes well, else appropriate error message
733 */
734int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
735 uint8_t *programmed_state,
736 uint8_t *current_state)
737{
738 struct ti_sci_msg_req_get_clock_state req;
739 struct ti_sci_msg_resp_get_clock_state resp;
740
741 struct ti_sci_xfer xfer;
742 int ret;
743
744 if (!programmed_state && !current_state)
745 return -EINVAL;
746
Andrew F. Davis60d23322019-02-11 13:44:31 -0600747 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000748 &req, sizeof(req),
749 &resp, sizeof(resp),
750 &xfer);
751 if (ret) {
752 ERROR("Message alloc failed (%d)\n", ret);
753 return ret;
754 }
755
756 req.dev_id = dev_id;
757 req.clk_id = clk_id;
758
759 ret = ti_sci_do_xfer(&xfer);
760 if (ret) {
761 ERROR("Transfer send failed (%d)\n", ret);
762 return ret;
763 }
764
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000765 if (programmed_state)
766 *programmed_state = resp.programmed_state;
767 if (current_state)
768 *current_state = resp.current_state;
769
770 return 0;
771}
772
773/**
774 * ti_sci_clock_get() - Get control of a clock from TI SCI
775
776 * @dev_id: Device identifier this request is for
777 * @clk_id: Clock identifier for the device for this request.
778 * Each device has its own set of clock inputs. This indexes
779 * which clock input to modify.
780 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
781 * @can_change_freq: 'true' iff frequency change is desired
782 * @enable_input_term: 'true' iff input termination is desired
783 *
784 * Return: 0 if all goes well, else appropriate error message
785 */
786int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
787 bool needs_ssc, bool can_change_freq,
788 bool enable_input_term)
789{
790 uint32_t flags = 0;
791
792 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
793 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
794 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
795
796 return ti_sci_clock_set_state(dev_id, clk_id, flags,
797 MSG_CLOCK_SW_STATE_REQ);
798}
799
800/**
801 * ti_sci_clock_idle() - Idle a clock which is in our control
802
803 * @dev_id: Device identifier this request is for
804 * @clk_id: Clock identifier for the device for this request.
805 * Each device has its own set of clock inputs. This indexes
806 * which clock input to modify.
807 *
808 * NOTE: This clock must have been requested by get_clock previously.
809 *
810 * Return: 0 if all goes well, else appropriate error message
811 */
812int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
813{
814 return ti_sci_clock_set_state(dev_id, clk_id, 0,
815 MSG_CLOCK_SW_STATE_UNREQ);
816}
817
818/**
819 * ti_sci_clock_put() - Release a clock from our control
820 *
821 * @dev_id: Device identifier this request is for
822 * @clk_id: Clock identifier for the device for this request.
823 * Each device has its own set of clock inputs. This indexes
824 * which clock input to modify.
825 *
826 * NOTE: This clock must have been requested by get_clock previously.
827 *
828 * Return: 0 if all goes well, else appropriate error message
829 */
830int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
831{
832 return ti_sci_clock_set_state(dev_id, clk_id, 0,
833 MSG_CLOCK_SW_STATE_AUTO);
834}
835
836/**
837 * ti_sci_clock_is_auto() - Is the clock being auto managed
838 *
839 * @dev_id: Device identifier this request is for
840 * @clk_id: Clock identifier for the device for this request.
841 * Each device has its own set of clock inputs. This indexes
842 * which clock input to modify.
843 * @req_state: state indicating if the clock is auto managed
844 *
845 * Return: 0 if all goes well, else appropriate error message
846 */
847int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
848{
849 uint8_t state = 0;
850 int ret;
851
852 if (!req_state)
853 return -EINVAL;
854
855 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
856 if (ret)
857 return ret;
858
859 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
860
861 return 0;
862}
863
864/**
865 * ti_sci_clock_is_on() - Is the clock ON
866 *
867 * @dev_id: Device identifier this request is for
868 * @clk_id: Clock identifier for the device for this request.
869 * Each device has its own set of clock inputs. This indexes
870 * which clock input to modify.
871 * @req_state: state indicating if the clock is managed by us and enabled
872 * @curr_state: state indicating if the clock is ready for operation
873 *
874 * Return: 0 if all goes well, else appropriate error message
875 */
876int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
877 bool *req_state, bool *curr_state)
878{
879 uint8_t c_state = 0, r_state = 0;
880 int ret;
881
882 if (!req_state && !curr_state)
883 return -EINVAL;
884
885 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
886 if (ret)
887 return ret;
888
889 if (req_state)
890 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
891 if (curr_state)
892 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
893
894 return 0;
895}
896
897/**
898 * ti_sci_clock_is_off() - Is the clock OFF
899 *
900 * @dev_id: Device identifier this request is for
901 * @clk_id: Clock identifier for the device for this request.
902 * Each device has its own set of clock inputs. This indexes
903 * which clock input to modify.
904 * @req_state: state indicating if the clock is managed by us and disabled
905 * @curr_state: state indicating if the clock is NOT ready for operation
906 *
907 * Return: 0 if all goes well, else appropriate error message
908 */
909int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
910 bool *req_state, bool *curr_state)
911{
912 uint8_t c_state = 0, r_state = 0;
913 int ret;
914
915 if (!req_state && !curr_state)
916 return -EINVAL;
917
918 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
919 if (ret)
920 return ret;
921
922 if (req_state)
923 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
924 if (curr_state)
925 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
926
927 return 0;
928}
929
930/**
931 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
932 *
933 * @dev_id: Device identifier this request is for
934 * @clk_id: Clock identifier for the device for this request.
935 * Each device has its own set of clock inputs. This indexes
936 * which clock input to modify.
937 * @parent_id: Parent clock identifier to set
938 *
939 * Return: 0 if all goes well, else appropriate error message
940 */
941int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
942{
943 struct ti_sci_msg_req_set_clock_parent req;
944 struct ti_sci_msg_hdr resp;
945
946 struct ti_sci_xfer xfer;
947 int ret;
948
Andrew F. Davis60d23322019-02-11 13:44:31 -0600949 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000950 &req, sizeof(req),
951 &resp, sizeof(resp),
952 &xfer);
953 if (ret) {
954 ERROR("Message alloc failed (%d)\n", ret);
955 return ret;
956 }
957
958 req.dev_id = dev_id;
959 req.clk_id = clk_id;
960 req.parent_id = parent_id;
961
962 ret = ti_sci_do_xfer(&xfer);
963 if (ret) {
964 ERROR("Transfer send failed (%d)\n", ret);
965 return ret;
966 }
967
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000968 return 0;
969}
970
971/**
972 * ti_sci_clock_get_parent() - Get current parent clock source
973 *
974 * @dev_id: Device identifier this request is for
975 * @clk_id: Clock identifier for the device for this request.
976 * Each device has its own set of clock inputs. This indexes
977 * which clock input to modify.
978 * @parent_id: Current clock parent
979 *
980 * Return: 0 if all goes well, else appropriate error message
981 */
982int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
983{
984 struct ti_sci_msg_req_get_clock_parent req;
985 struct ti_sci_msg_resp_get_clock_parent resp;
986
987 struct ti_sci_xfer xfer;
988 int ret;
989
Andrew F. Davis60d23322019-02-11 13:44:31 -0600990 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +0000991 &req, sizeof(req),
992 &resp, sizeof(resp),
993 &xfer);
994 if (ret) {
995 ERROR("Message alloc failed (%d)\n", ret);
996 return ret;
997 }
998
999 req.dev_id = dev_id;
1000 req.clk_id = clk_id;
1001
1002 ret = ti_sci_do_xfer(&xfer);
1003 if (ret) {
1004 ERROR("Transfer send failed (%d)\n", ret);
1005 return ret;
1006 }
1007
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001008 *parent_id = resp.parent_id;
1009
1010 return 0;
1011}
1012
1013/**
1014 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
1015 *
1016 * @dev_id: Device identifier this request is for
1017 * @clk_id: Clock identifier for the device for this request.
1018 * Each device has its own set of clock inputs. This indexes
1019 * which clock input to modify.
1020 * @num_parents: Returns he number of parents to the current clock.
1021 *
1022 * Return: 0 if all goes well, else appropriate error message
1023 */
1024int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
1025 uint8_t *num_parents)
1026{
1027 struct ti_sci_msg_req_get_clock_num_parents req;
1028 struct ti_sci_msg_resp_get_clock_num_parents resp;
1029
1030 struct ti_sci_xfer xfer;
1031 int ret;
1032
Andrew F. Davis60d23322019-02-11 13:44:31 -06001033 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001034 &req, sizeof(req),
1035 &resp, sizeof(resp),
1036 &xfer);
1037 if (ret) {
1038 ERROR("Message alloc failed (%d)\n", ret);
1039 return ret;
1040 }
1041
1042 req.dev_id = dev_id;
1043 req.clk_id = clk_id;
1044
1045 ret = ti_sci_do_xfer(&xfer);
1046 if (ret) {
1047 ERROR("Transfer send failed (%d)\n", ret);
1048 return ret;
1049 }
1050
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001051 *num_parents = resp.num_parents;
1052
1053 return 0;
1054}
1055
1056/**
1057 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1058 *
1059 * @dev_id: Device identifier this request is for
1060 * @clk_id: Clock identifier for the device for this request.
1061 * Each device has its own set of clock inputs. This indexes
1062 * which clock input to modify.
1063 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1064 * allowable programmed frequency and does not account for clock
1065 * tolerances and jitter.
1066 * @target_freq: The target clock frequency in Hz. A frequency will be
1067 * processed as close to this target frequency as possible.
1068 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1069 * allowable programmed frequency and does not account for clock
1070 * tolerances and jitter.
1071 * @match_freq: Frequency match in Hz response.
1072 *
1073 * Return: 0 if all goes well, else appropriate error message
1074 */
1075int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1076 uint64_t min_freq, uint64_t target_freq,
1077 uint64_t max_freq, uint64_t *match_freq)
1078{
1079 struct ti_sci_msg_req_query_clock_freq req;
1080 struct ti_sci_msg_resp_query_clock_freq resp;
1081
1082 struct ti_sci_xfer xfer;
1083 int ret;
1084
Andrew F. Davis60d23322019-02-11 13:44:31 -06001085 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001086 &req, sizeof(req),
1087 &resp, sizeof(resp),
1088 &xfer);
1089 if (ret) {
1090 ERROR("Message alloc failed (%d)\n", ret);
1091 return ret;
1092 }
1093
1094 req.dev_id = dev_id;
1095 req.clk_id = clk_id;
1096 req.min_freq_hz = min_freq;
1097 req.target_freq_hz = target_freq;
1098 req.max_freq_hz = max_freq;
1099
1100 ret = ti_sci_do_xfer(&xfer);
1101 if (ret) {
1102 ERROR("Transfer send failed (%d)\n", ret);
1103 return ret;
1104 }
1105
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001106 *match_freq = resp.freq_hz;
1107
1108 return 0;
1109}
1110
1111/**
1112 * ti_sci_clock_set_freq() - Set a frequency for clock
1113 *
1114 * @dev_id: Device identifier this request is for
1115 * @clk_id: Clock identifier for the device for this request.
1116 * Each device has its own set of clock inputs. This indexes
1117 * which clock input to modify.
1118 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1119 * allowable programmed frequency and does not account for clock
1120 * tolerances and jitter.
1121 * @target_freq: The target clock frequency in Hz. A frequency will be
1122 * processed as close to this target frequency as possible.
1123 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1124 * allowable programmed frequency and does not account for clock
1125 * tolerances and jitter.
1126 *
1127 * Return: 0 if all goes well, else appropriate error message
1128 */
1129int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1130 uint64_t target_freq, uint64_t max_freq)
1131{
1132 struct ti_sci_msg_req_set_clock_freq req;
1133 struct ti_sci_msg_hdr resp;
1134
1135 struct ti_sci_xfer xfer;
1136 int ret;
1137
Andrew F. Davis60d23322019-02-11 13:44:31 -06001138 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001139 &req, sizeof(req),
1140 &resp, sizeof(resp),
1141 &xfer);
1142 if (ret) {
1143 ERROR("Message alloc failed (%d)\n", ret);
1144 return ret;
1145 }
1146 req.dev_id = dev_id;
1147 req.clk_id = clk_id;
1148 req.min_freq_hz = min_freq;
1149 req.target_freq_hz = target_freq;
1150 req.max_freq_hz = max_freq;
1151
1152 ret = ti_sci_do_xfer(&xfer);
1153 if (ret) {
1154 ERROR("Transfer send failed (%d)\n", ret);
1155 return ret;
1156 }
1157
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001158 return 0;
1159}
1160
1161/**
1162 * ti_sci_clock_get_freq() - Get current frequency
1163 *
1164 * @dev_id: Device identifier this request is for
1165 * @clk_id: Clock identifier for the device for this request.
1166 * Each device has its own set of clock inputs. This indexes
1167 * which clock input to modify.
1168 * @freq: Currently frequency in Hz
1169 *
1170 * Return: 0 if all goes well, else appropriate error message
1171 */
1172int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1173{
1174 struct ti_sci_msg_req_get_clock_freq req;
1175 struct ti_sci_msg_resp_get_clock_freq resp;
1176
1177 struct ti_sci_xfer xfer;
1178 int ret;
1179
Andrew F. Davis60d23322019-02-11 13:44:31 -06001180 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ, 0,
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001181 &req, sizeof(req),
1182 &resp, sizeof(resp),
1183 &xfer);
1184 if (ret) {
1185 ERROR("Message alloc failed (%d)\n", ret);
1186 return ret;
1187 }
1188
1189 req.dev_id = dev_id;
1190 req.clk_id = clk_id;
1191
1192 ret = ti_sci_do_xfer(&xfer);
1193 if (ret) {
1194 ERROR("Transfer send failed (%d)\n", ret);
1195 return ret;
1196 }
1197
Andrew F. Davis6d1dfef2018-05-04 19:06:11 +00001198 *freq = resp.freq_hz;
1199
1200 return 0;
1201}
1202
1203/**
Andrew F. Davis7b8f3e22018-05-04 19:06:12 +00001204 * ti_sci_core_reboot() - Command to request system reset
1205 *
1206 * Return: 0 if all goes well, else appropriate error message
1207 */
1208int ti_sci_core_reboot(void)
1209{
1210 struct ti_sci_msg_req_reboot req;
1211 struct ti_sci_msg_hdr resp;
1212
1213 struct ti_sci_xfer xfer;
1214 int ret;
1215
Andrew F. Davis60d23322019-02-11 13:44:31 -06001216 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET, 0,
Andrew F. Davis7b8f3e22018-05-04 19:06:12 +00001217 &req, sizeof(req),
1218 &resp, sizeof(resp),
1219 &xfer);
1220 if (ret) {
1221 ERROR("Message alloc failed (%d)\n", ret);
1222 return ret;
1223 }
Suman Anna22b7a222020-10-24 01:28:54 +00001224 req.domain = TI_SCI_DOMAIN_FULL_SOC_RESET;
Andrew F. Davis7b8f3e22018-05-04 19:06:12 +00001225
1226 ret = ti_sci_do_xfer(&xfer);
1227 if (ret) {
1228 ERROR("Transfer send failed (%d)\n", ret);
1229 return ret;
1230 }
1231
Andrew F. Davis7b8f3e22018-05-04 19:06:12 +00001232 return 0;
1233}
1234
1235/**
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001236 * ti_sci_proc_request() - Request a physical processor control
1237 *
1238 * @proc_id: Processor ID this request is for
1239 *
1240 * Return: 0 if all goes well, else appropriate error message
1241 */
1242int ti_sci_proc_request(uint8_t proc_id)
1243{
1244 struct ti_sci_msg_req_proc_request req;
1245 struct ti_sci_msg_hdr resp;
1246
1247 struct ti_sci_xfer xfer;
1248 int ret;
1249
Andrew F. Davis60d23322019-02-11 13:44:31 -06001250 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001251 &req, sizeof(req),
1252 &resp, sizeof(resp),
1253 &xfer);
1254 if (ret) {
1255 ERROR("Message alloc failed (%d)\n", ret);
1256 return ret;
1257 }
1258
1259 req.processor_id = proc_id;
1260
1261 ret = ti_sci_do_xfer(&xfer);
1262 if (ret) {
1263 ERROR("Transfer send failed (%d)\n", ret);
1264 return ret;
1265 }
1266
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001267 return 0;
1268}
1269
1270/**
1271 * ti_sci_proc_release() - Release a physical processor control
1272 *
1273 * @proc_id: Processor ID this request is for
1274 *
1275 * Return: 0 if all goes well, else appropriate error message
1276 */
1277int ti_sci_proc_release(uint8_t proc_id)
1278{
1279 struct ti_sci_msg_req_proc_release req;
1280 struct ti_sci_msg_hdr resp;
1281
1282 struct ti_sci_xfer xfer;
1283 int ret;
1284
Andrew F. Davis60d23322019-02-11 13:44:31 -06001285 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001286 &req, sizeof(req),
1287 &resp, sizeof(resp),
1288 &xfer);
1289 if (ret) {
1290 ERROR("Message alloc failed (%d)\n", ret);
1291 return ret;
1292 }
1293
1294 req.processor_id = proc_id;
1295
1296 ret = ti_sci_do_xfer(&xfer);
1297 if (ret) {
1298 ERROR("Transfer send failed (%d)\n", ret);
1299 return ret;
1300 }
1301
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001302 return 0;
1303}
1304
1305/**
1306 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1307 * the processor's access control list.
1308 *
1309 * @proc_id: Processor ID this request is for
1310 * @host_id: Host ID to get the control of the processor
1311 *
1312 * Return: 0 if all goes well, else appropriate error message
1313 */
1314int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1315{
1316 struct ti_sci_msg_req_proc_handover req;
1317 struct ti_sci_msg_hdr resp;
1318
1319 struct ti_sci_xfer xfer;
1320 int ret;
1321
Andrew F. Davis60d23322019-02-11 13:44:31 -06001322 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001323 &req, sizeof(req),
1324 &resp, sizeof(resp),
1325 &xfer);
1326 if (ret) {
1327 ERROR("Message alloc failed (%d)\n", ret);
1328 return ret;
1329 }
1330
1331 req.processor_id = proc_id;
1332 req.host_id = host_id;
1333
1334 ret = ti_sci_do_xfer(&xfer);
1335 if (ret) {
1336 ERROR("Transfer send failed (%d)\n", ret);
1337 return ret;
1338 }
1339
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001340 return 0;
1341}
1342
1343/**
1344 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1345 *
1346 * @proc_id: Processor ID this request is for
1347 * @config_flags_set: Configuration flags to be set
1348 * @config_flags_clear: Configuration flags to be cleared
1349 *
1350 * Return: 0 if all goes well, else appropriate error message
1351 */
1352int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1353 uint32_t config_flags_set,
1354 uint32_t config_flags_clear)
1355{
1356 struct ti_sci_msg_req_set_proc_boot_config req;
1357 struct ti_sci_msg_hdr resp;
1358
1359 struct ti_sci_xfer xfer;
1360 int ret;
1361
Andrew F. Davis60d23322019-02-11 13:44:31 -06001362 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001363 &req, sizeof(req),
1364 &resp, sizeof(resp),
1365 &xfer);
1366 if (ret) {
1367 ERROR("Message alloc failed (%d)\n", ret);
1368 return ret;
1369 }
1370
1371 req.processor_id = proc_id;
1372 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1373 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1374 TISCI_ADDR_HIGH_SHIFT;
1375 req.config_flags_set = config_flags_set;
1376 req.config_flags_clear = config_flags_clear;
1377
1378 ret = ti_sci_do_xfer(&xfer);
1379 if (ret) {
1380 ERROR("Transfer send failed (%d)\n", ret);
1381 return ret;
1382 }
1383
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001384 return 0;
1385}
1386
1387/**
1388 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1389 *
1390 * @proc_id: Processor ID this request is for
1391 * @control_flags_set: Control flags to be set
1392 * @control_flags_clear: Control flags to be cleared
1393 *
1394 * Return: 0 if all goes well, else appropriate error message
1395 */
1396int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1397 uint32_t control_flags_clear)
1398{
1399 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1400 struct ti_sci_msg_hdr resp;
1401
1402 struct ti_sci_xfer xfer;
1403 int ret;
1404
Andrew F. Davis60d23322019-02-11 13:44:31 -06001405 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001406 &req, sizeof(req),
1407 &resp, sizeof(resp),
1408 &xfer);
1409 if (ret) {
1410 ERROR("Message alloc failed (%d)\n", ret);
1411 return ret;
1412 }
1413
1414 req.processor_id = proc_id;
1415 req.control_flags_set = control_flags_set;
1416 req.control_flags_clear = control_flags_clear;
1417
1418 ret = ti_sci_do_xfer(&xfer);
1419 if (ret) {
1420 ERROR("Transfer send failed (%d)\n", ret);
1421 return ret;
1422 }
1423
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001424 return 0;
1425}
1426
1427/**
Andrew F. Davise9152c12019-02-11 14:18:53 -06001428 * ti_sci_proc_set_boot_ctrl_no_wait() - Set the processor boot control flags
1429 * without requesting or waiting for a
1430 * response.
1431 *
1432 * @proc_id: Processor ID this request is for
1433 * @control_flags_set: Control flags to be set
1434 * @control_flags_clear: Control flags to be cleared
1435 *
1436 * Return: 0 if all goes well, else appropriate error message
1437 */
1438int ti_sci_proc_set_boot_ctrl_no_wait(uint8_t proc_id,
1439 uint32_t control_flags_set,
1440 uint32_t control_flags_clear)
1441{
1442 struct ti_sci_msg_req_set_proc_boot_ctrl req;
Andrew Davis6688fd72022-05-16 13:17:02 -05001443 struct ti_sci_xfer xfer;
Andrew F. Davise9152c12019-02-11 14:18:53 -06001444 int ret;
1445
Manorit Chawdhryd7a71352023-08-22 16:15:40 +05301446 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL, 0,
Andrew Davis6688fd72022-05-16 13:17:02 -05001447 &req, sizeof(req),
1448 NULL, 0,
1449 &xfer);
1450 if (ret != 0U) {
1451 ERROR("Message alloc failed (%d)\n", ret);
1452 return ret;
1453 }
Andrew F. Davise9152c12019-02-11 14:18:53 -06001454
1455 req.processor_id = proc_id;
1456 req.control_flags_set = control_flags_set;
1457 req.control_flags_clear = control_flags_clear;
1458
Andrew Davis6688fd72022-05-16 13:17:02 -05001459 ret = ti_sci_do_xfer(&xfer);
1460 if (ret != 0U) {
1461 ERROR("Transfer send failed (%d)\n", ret);
Andrew F. Davise9152c12019-02-11 14:18:53 -06001462 return ret;
1463 }
1464
Andrew F. Davise9152c12019-02-11 14:18:53 -06001465 return 0;
1466}
1467
1468/**
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001469 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1470 * processor configuration flags
1471 *
1472 * @proc_id: Processor ID this request is for
1473 * @cert_addr: Memory address at which payload image certificate is located
1474 *
1475 * Return: 0 if all goes well, else appropriate error message
1476 */
1477int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1478{
1479 struct ti_sci_msg_req_proc_auth_boot_image req;
1480 struct ti_sci_msg_hdr resp;
1481
1482 struct ti_sci_xfer xfer;
1483 int ret;
1484
Andrew Davis81f525e2023-01-10 12:34:20 -06001485 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMAGE, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001486 &req, sizeof(req),
1487 &resp, sizeof(resp),
1488 &xfer);
1489 if (ret) {
1490 ERROR("Message alloc failed (%d)\n", ret);
1491 return ret;
1492 }
1493
1494 req.processor_id = proc_id;
1495 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1496 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1497 TISCI_ADDR_HIGH_SHIFT;
1498
1499 ret = ti_sci_do_xfer(&xfer);
1500 if (ret) {
1501 ERROR("Transfer send failed (%d)\n", ret);
1502 return ret;
1503 }
1504
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001505 return 0;
1506}
1507
1508/**
1509 * ti_sci_proc_get_boot_status() - Get the processor boot status
1510 *
1511 * @proc_id: Processor ID this request is for
1512 *
1513 * Return: 0 if all goes well, else appropriate error message
1514 */
1515int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1516 uint32_t *cfg_flags,
1517 uint32_t *ctrl_flags,
1518 uint32_t *sts_flags)
1519{
1520 struct ti_sci_msg_req_get_proc_boot_status req;
1521 struct ti_sci_msg_resp_get_proc_boot_status resp;
1522
1523 struct ti_sci_xfer xfer;
1524 int ret;
1525
Andrew F. Davis60d23322019-02-11 13:44:31 -06001526 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS, 0,
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001527 &req, sizeof(req),
1528 &resp, sizeof(resp),
1529 &xfer);
1530 if (ret) {
1531 ERROR("Message alloc failed (%d)\n", ret);
1532 return ret;
1533 }
1534
1535 req.processor_id = proc_id;
1536
1537 ret = ti_sci_do_xfer(&xfer);
1538 if (ret) {
1539 ERROR("Transfer send failed (%d)\n", ret);
1540 return ret;
1541 }
1542
Andrew F. Davis89ea53c2018-05-04 19:06:13 +00001543 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1544 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1545 TISCI_ADDR_HIGH_MASK);
1546 *cfg_flags = resp.config_flags;
1547 *ctrl_flags = resp.control_flags;
1548 *sts_flags = resp.status_flags;
1549
1550 return 0;
1551}
1552
1553/**
Andrew F. Davis394977e2018-12-18 13:21:12 -06001554 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1555 *
1556 * @proc_id: Processor ID this request is for
1557 * @num_wait_iterations Total number of iterations we will check before
1558 * we will timeout and give up
1559 * @num_match_iterations How many iterations should we have continued
1560 * status to account for status bits glitching.
1561 * This is to make sure that match occurs for
1562 * consecutive checks. This implies that the
1563 * worst case should consider that the stable
1564 * time should at the worst be num_wait_iterations
1565 * num_match_iterations to prevent timeout.
1566 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1567 * between each status checks. This is the minimum
1568 * duration, and overhead of register reads and
1569 * checks are on top of this and can vary based on
1570 * varied conditions.
1571 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1572 * before the very first check in the first
1573 * iteration of status check loop. This is the
1574 * minimum duration, and overhead of register
1575 * reads and checks are.
1576 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1577 * status matching this field requested MUST be 1.
1578 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1579 * bits matching this field requested MUST be 1.
1580 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1581 * status matching this field requested MUST be 0.
1582 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1583 * bits matching this field requested MUST be 0.
1584 *
1585 * Return: 0 if all goes well, else appropriate error message
1586 */
1587int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1588 uint8_t num_match_iterations,
1589 uint8_t delay_per_iteration_us,
1590 uint8_t delay_before_iterations_us,
1591 uint32_t status_flags_1_set_all_wait,
1592 uint32_t status_flags_1_set_any_wait,
1593 uint32_t status_flags_1_clr_all_wait,
1594 uint32_t status_flags_1_clr_any_wait)
1595{
1596 struct ti_sci_msg_req_wait_proc_boot_status req;
1597 struct ti_sci_msg_hdr resp;
1598
1599 struct ti_sci_xfer xfer;
1600 int ret;
1601
Andrew F. Davis60d23322019-02-11 13:44:31 -06001602 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
Andrew F. Davis394977e2018-12-18 13:21:12 -06001603 &req, sizeof(req),
1604 &resp, sizeof(resp),
1605 &xfer);
1606 if (ret) {
1607 ERROR("Message alloc failed (%d)\n", ret);
1608 return ret;
1609 }
1610
1611 req.processor_id = proc_id;
1612 req.num_wait_iterations = num_wait_iterations;
1613 req.num_match_iterations = num_match_iterations;
1614 req.delay_per_iteration_us = delay_per_iteration_us;
1615 req.delay_before_iterations_us = delay_before_iterations_us;
1616 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1617 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1618 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1619 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1620
1621 ret = ti_sci_do_xfer(&xfer);
1622 if (ret) {
1623 ERROR("Transfer send failed (%d)\n", ret);
1624 return ret;
1625 }
1626
Andrew F. Davis394977e2018-12-18 13:21:12 -06001627 return 0;
1628}
1629
1630/**
Andrew F. Davise9152c12019-02-11 14:18:53 -06001631 * ti_sci_proc_wait_boot_status_no_wait() - Wait for a processor boot status
1632 * without requesting or waiting for
1633 * a response.
1634 *
1635 * @proc_id: Processor ID this request is for
1636 * @num_wait_iterations Total number of iterations we will check before
1637 * we will timeout and give up
1638 * @num_match_iterations How many iterations should we have continued
1639 * status to account for status bits glitching.
1640 * This is to make sure that match occurs for
1641 * consecutive checks. This implies that the
1642 * worst case should consider that the stable
1643 * time should at the worst be num_wait_iterations
1644 * num_match_iterations to prevent timeout.
1645 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1646 * between each status checks. This is the minimum
1647 * duration, and overhead of register reads and
1648 * checks are on top of this and can vary based on
1649 * varied conditions.
1650 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1651 * before the very first check in the first
1652 * iteration of status check loop. This is the
1653 * minimum duration, and overhead of register
1654 * reads and checks are.
1655 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1656 * status matching this field requested MUST be 1.
1657 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1658 * bits matching this field requested MUST be 1.
1659 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1660 * status matching this field requested MUST be 0.
1661 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1662 * bits matching this field requested MUST be 0.
1663 *
1664 * Return: 0 if all goes well, else appropriate error message
1665 */
1666int ti_sci_proc_wait_boot_status_no_wait(uint8_t proc_id,
1667 uint8_t num_wait_iterations,
1668 uint8_t num_match_iterations,
1669 uint8_t delay_per_iteration_us,
1670 uint8_t delay_before_iterations_us,
1671 uint32_t status_flags_1_set_all_wait,
1672 uint32_t status_flags_1_set_any_wait,
1673 uint32_t status_flags_1_clr_all_wait,
1674 uint32_t status_flags_1_clr_any_wait)
1675{
1676 struct ti_sci_msg_req_wait_proc_boot_status req;
Andrew Davis6688fd72022-05-16 13:17:02 -05001677 struct ti_sci_xfer xfer;
Andrew F. Davise9152c12019-02-11 14:18:53 -06001678 int ret;
1679
Manorit Chawdhryd7a71352023-08-22 16:15:40 +05301680 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS, 0,
Andrew Davis6688fd72022-05-16 13:17:02 -05001681 &req, sizeof(req),
1682 NULL, 0,
1683 &xfer);
1684 if (ret != 0U) {
1685 ERROR("Message alloc failed (%d)\n", ret);
1686 return ret;
1687 }
Andrew F. Davise9152c12019-02-11 14:18:53 -06001688
1689 req.processor_id = proc_id;
1690 req.num_wait_iterations = num_wait_iterations;
1691 req.num_match_iterations = num_match_iterations;
1692 req.delay_per_iteration_us = delay_per_iteration_us;
1693 req.delay_before_iterations_us = delay_before_iterations_us;
1694 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1695 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1696 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1697 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1698
Andrew Davis6688fd72022-05-16 13:17:02 -05001699 ret = ti_sci_do_xfer(&xfer);
1700 if (ret != 0U) {
1701 ERROR("Transfer send failed (%d)\n", ret);
Andrew F. Davise9152c12019-02-11 14:18:53 -06001702 return ret;
1703 }
1704
Andrew F. Davise9152c12019-02-11 14:18:53 -06001705 return 0;
1706}
1707
1708/**
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001709 * ti_sci_enter_sleep - Command to initiate system transition into suspend.
1710 *
1711 * @proc_id: Processor ID.
1712 * @mode: Low power mode to enter.
1713 * @core_resume_addr: Address that core should be
1714 * resumed from after low power transition.
1715 *
1716 * Return: 0 if all goes well, else appropriate error message
1717 */
1718int ti_sci_enter_sleep(uint8_t proc_id,
1719 uint8_t mode,
1720 uint64_t core_resume_addr)
1721{
1722 struct ti_sci_msg_req_enter_sleep req;
Andrew Davis6688fd72022-05-16 13:17:02 -05001723 struct ti_sci_xfer xfer;
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001724 int ret;
1725
Manorit Chawdhryd7a71352023-08-22 16:15:40 +05301726 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_ENTER_SLEEP, 0,
Andrew Davis6688fd72022-05-16 13:17:02 -05001727 &req, sizeof(req),
1728 NULL, 0,
1729 &xfer);
1730 if (ret != 0U) {
1731 ERROR("Message alloc failed (%d)\n", ret);
1732 return ret;
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001733 }
1734
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001735 req.processor_id = proc_id;
1736 req.mode = mode;
1737 req.core_resume_lo = core_resume_addr & TISCI_ADDR_LOW_MASK;
1738 req.core_resume_hi = (core_resume_addr & TISCI_ADDR_HIGH_MASK) >>
1739 TISCI_ADDR_HIGH_SHIFT;
1740
Andrew Davis6688fd72022-05-16 13:17:02 -05001741 ret = ti_sci_do_xfer(&xfer);
1742 if (ret != 0U) {
1743 ERROR("Transfer send failed (%d)\n", ret);
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001744 return ret;
1745 }
1746
Dave Gerlachcf5868b2021-11-30 15:35:08 -06001747 return 0;
1748}
Markus Schneider-Pargmann9b7550f2024-10-03 20:59:52 +02001749
1750/**
1751 * ti_sci_lpm_get_next_sys_mode() - Get next LPM system mode
1752 *
1753 * @next_mode: pointer to a variable that will store the next mode
1754 *
1755 * Return: 0 if all goes well, else appropriate error message
1756 */
1757int ti_sci_lpm_get_next_sys_mode(uint8_t *next_mode)
1758{
1759 struct ti_sci_msg_req_lpm_get_next_sys_mode req;
1760 struct ti_sci_msg_resp_lpm_get_next_sys_mode resp;
1761 struct ti_sci_xfer xfer;
1762 int ret;
1763
1764 if (next_mode == NULL) {
1765 return -EINVAL;
1766 }
1767
1768 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_LPM_GET_NEXT_SYS_MODE, 0,
1769 &req, sizeof(req),
1770 &resp, sizeof(resp),
1771 &xfer);
1772 if (ret != 0) {
1773 ERROR("Message alloc failed (%d)\n", ret);
1774 return ret;
1775 }
1776
1777 ret = ti_sci_do_xfer(&xfer);
1778 if (ret != 0) {
1779 ERROR("Transfer send failed (%d)\n", ret);
1780 return ret;
1781 }
1782
1783 *next_mode = resp.mode;
1784
1785 return 0;
1786}