blob: 9ab3004769f3fd6c6d69f6337191a69e47d74395 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu06ebac72019-09-29 16:01:54 +08002 * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
9#include "tfm_ns_mailbox.h"
David Hu06ebac72019-09-29 16:01:54 +080010#include "tfm_plat_ns.h"
David Hud2753b32019-09-23 18:46:15 +080011
12/* The pointer to NSPE mailbox queue */
13static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
14
15static inline void clear_queue_slot_empty(uint8_t idx)
16{
17 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
18 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
19 }
20}
21
22static inline void set_queue_slot_empty(uint8_t idx)
23{
24 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
25 mailbox_queue_ptr->empty_slots |= (1 << idx);
26 }
27}
28
29static inline void set_queue_slot_pend(uint8_t idx)
30{
31 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
32 mailbox_queue_ptr->pend_slots |= (1 << idx);
33 }
34}
35
36static inline int32_t get_mailbox_msg_handle(uint8_t idx,
37 mailbox_msg_handle_t *handle)
38{
39 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
40 return MAILBOX_INVAL_PARAMS;
41 }
42
43 *handle = (mailbox_msg_handle_t)(idx + 1);
44
45 return MAILBOX_SUCCESS;
46}
47
48static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
49 uint8_t *idx)
50{
51 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
52 return MAILBOX_INVAL_PARAMS;
53 }
54
55 *idx = (uint8_t)(handle - 1);
56
57 return MAILBOX_SUCCESS;
58}
59
60static inline void clear_queue_slot_replied(uint8_t idx)
61{
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
64 }
65}
66
67static uint8_t acquire_empty_slot(const struct ns_mailbox_queue_t *queue)
68{
69 uint8_t idx;
70 mailbox_queue_status_t status;
71
72 tfm_ns_mailbox_hal_enter_critical();
73 status = queue->empty_slots;
74
75 if (!status) {
76 /* No empty slot */
77 tfm_ns_mailbox_hal_exit_critical();
78 return NUM_MAILBOX_QUEUE_SLOT;
79 }
80
81 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
82 if (status & (1 << idx)) {
83 break;
84 }
85 }
86
87 clear_queue_slot_empty(idx);
88
89 tfm_ns_mailbox_hal_exit_critical();
90
91 return idx;
92}
93
David Hu06ebac72019-09-29 16:01:54 +080094static void set_msg_owner(uint8_t idx, const void *owner)
95{
96 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
97 mailbox_queue_ptr->queue[idx].owner = owner;
98 }
99}
100
David Hud2753b32019-09-23 18:46:15 +0800101mailbox_msg_handle_t tfm_ns_mailbox_tx_client_req(uint32_t call_type,
102 const struct psa_client_params_t *params,
103 int32_t client_id)
104{
105 uint8_t idx;
106 struct mailbox_msg_t *msg_ptr;
107 mailbox_msg_handle_t handle;
David Hu06ebac72019-09-29 16:01:54 +0800108 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800109
110 if (!mailbox_queue_ptr) {
111 return MAILBOX_MSG_NULL_HANDLE;
112 }
113
114 if (!params) {
115 return MAILBOX_MSG_NULL_HANDLE;
116 }
117
118 idx = acquire_empty_slot(mailbox_queue_ptr);
119 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
120 return MAILBOX_QUEUE_FULL;
121 }
122
123 /* Fill the mailbox message */
124 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
125
126 msg_ptr->call_type = call_type;
127 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
128 msg_ptr->client_id = client_id;
129
David Hu06ebac72019-09-29 16:01:54 +0800130 /*
131 * Fetch the current task handle. The task will be woken up according the
132 * handle value set in the owner field.
133 */
134 task_handle = tfm_ns_mailbox_get_task_handle();
135 set_msg_owner(idx, task_handle);
136
David Hud2753b32019-09-23 18:46:15 +0800137 get_mailbox_msg_handle(idx, &handle);
138
139 tfm_ns_mailbox_hal_enter_critical();
140 set_queue_slot_pend(idx);
141 tfm_ns_mailbox_hal_exit_critical();
142
143 tfm_ns_mailbox_hal_notify_peer();
144
145 return handle;
146}
147
148int32_t tfm_ns_mailbox_rx_client_reply(mailbox_msg_handle_t handle,
149 int32_t *reply)
150{
151 uint8_t idx;
152 int32_t ret;
153
154 if (!mailbox_queue_ptr) {
155 return MAILBOX_INVAL_PARAMS;
156 }
157
158 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
159 return MAILBOX_INVAL_PARAMS;
160 }
161
162 ret = get_mailbox_msg_idx(handle, &idx);
163 if (ret != MAILBOX_SUCCESS) {
164 return ret;
165 }
166
167 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
168
David Hu06ebac72019-09-29 16:01:54 +0800169 /* Clear up the owner field */
170 set_msg_owner(idx, NULL);
171
David Hud2753b32019-09-23 18:46:15 +0800172 tfm_ns_mailbox_hal_enter_critical();
173 set_queue_slot_empty(idx);
174 clear_queue_slot_replied(idx);
175 tfm_ns_mailbox_hal_exit_critical();
176
177 return MAILBOX_SUCCESS;
178}
179
180bool tfm_ns_mailbox_is_msg_replied(mailbox_msg_handle_t handle)
181{
182 uint8_t idx;
183 int32_t ret;
184 mailbox_queue_status_t status;
185
186 if (!mailbox_queue_ptr) {
187 return false;
188 }
189
190 if (handle == MAILBOX_MSG_NULL_HANDLE) {
191 return false;
192 }
193
194 ret = get_mailbox_msg_idx(handle, &idx);
195 if (ret != MAILBOX_SUCCESS) {
196 return false;
197 }
198
199 tfm_ns_mailbox_hal_enter_critical();
200 status = mailbox_queue_ptr->replied_slots;
201 tfm_ns_mailbox_hal_exit_critical();
202
203 if (status & (1 << idx)) {
204 return true;
205 }
206
207 return false;
208}
209
210int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
211{
212 int32_t ret;
213
214 if (!queue) {
215 return MAILBOX_INVAL_PARAMS;
216 }
217
218 /*
219 * Further verification of mailbox queue address may be required according
220 * to non-secure memory assignment.
221 */
222
223 memset(queue, 0, sizeof(*queue));
224
225 /* Initialize empty bitmask */
226 queue->empty_slots = (mailbox_queue_status_t)((1 << NUM_MAILBOX_QUEUE_SLOT)
227 - 1);
228
229 mailbox_queue_ptr = queue;
230
231 /* Platform specific initialization. */
232 ret = tfm_ns_mailbox_hal_init(queue);
233
234 return ret;
235}