blob: 98193e8c800bf60fad9b1f564678ad8138b36c65 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu06ebac72019-09-29 16:01:54 +08002 * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
9#include "tfm_ns_mailbox.h"
David Hu06ebac72019-09-29 16:01:54 +080010#include "tfm_plat_ns.h"
David Hud2753b32019-09-23 18:46:15 +080011
12/* The pointer to NSPE mailbox queue */
13static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
14
15static inline void clear_queue_slot_empty(uint8_t idx)
16{
17 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
18 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
19 }
20}
21
22static inline void set_queue_slot_empty(uint8_t idx)
23{
24 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
25 mailbox_queue_ptr->empty_slots |= (1 << idx);
26 }
27}
28
29static inline void set_queue_slot_pend(uint8_t idx)
30{
31 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
32 mailbox_queue_ptr->pend_slots |= (1 << idx);
33 }
34}
35
36static inline int32_t get_mailbox_msg_handle(uint8_t idx,
37 mailbox_msg_handle_t *handle)
38{
39 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
40 return MAILBOX_INVAL_PARAMS;
41 }
42
43 *handle = (mailbox_msg_handle_t)(idx + 1);
44
45 return MAILBOX_SUCCESS;
46}
47
48static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
49 uint8_t *idx)
50{
51 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
52 return MAILBOX_INVAL_PARAMS;
53 }
54
55 *idx = (uint8_t)(handle - 1);
56
57 return MAILBOX_SUCCESS;
58}
59
60static inline void clear_queue_slot_replied(uint8_t idx)
61{
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
64 }
65}
66
David Hu3684ee72019-11-12 18:43:34 +080067static inline void set_queue_slot_woken(uint8_t idx)
68{
69 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
70 mailbox_queue_ptr->queue[idx].is_woken = true;
71 }
72}
73
David Hud2753b32019-09-23 18:46:15 +080074static uint8_t acquire_empty_slot(const struct ns_mailbox_queue_t *queue)
75{
76 uint8_t idx;
77 mailbox_queue_status_t status;
78
79 tfm_ns_mailbox_hal_enter_critical();
80 status = queue->empty_slots;
81
82 if (!status) {
83 /* No empty slot */
84 tfm_ns_mailbox_hal_exit_critical();
85 return NUM_MAILBOX_QUEUE_SLOT;
86 }
87
88 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
89 if (status & (1 << idx)) {
90 break;
91 }
92 }
93
94 clear_queue_slot_empty(idx);
95
96 tfm_ns_mailbox_hal_exit_critical();
97
98 return idx;
99}
100
David Hu06ebac72019-09-29 16:01:54 +0800101static void set_msg_owner(uint8_t idx, const void *owner)
102{
103 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
104 mailbox_queue_ptr->queue[idx].owner = owner;
105 }
106}
107
David Hud2753b32019-09-23 18:46:15 +0800108mailbox_msg_handle_t tfm_ns_mailbox_tx_client_req(uint32_t call_type,
109 const struct psa_client_params_t *params,
110 int32_t client_id)
111{
112 uint8_t idx;
113 struct mailbox_msg_t *msg_ptr;
114 mailbox_msg_handle_t handle;
David Hu06ebac72019-09-29 16:01:54 +0800115 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800116
117 if (!mailbox_queue_ptr) {
118 return MAILBOX_MSG_NULL_HANDLE;
119 }
120
121 if (!params) {
122 return MAILBOX_MSG_NULL_HANDLE;
123 }
124
125 idx = acquire_empty_slot(mailbox_queue_ptr);
126 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
127 return MAILBOX_QUEUE_FULL;
128 }
129
130 /* Fill the mailbox message */
131 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
132
133 msg_ptr->call_type = call_type;
134 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
135 msg_ptr->client_id = client_id;
136
David Hu06ebac72019-09-29 16:01:54 +0800137 /*
138 * Fetch the current task handle. The task will be woken up according the
139 * handle value set in the owner field.
140 */
141 task_handle = tfm_ns_mailbox_get_task_handle();
142 set_msg_owner(idx, task_handle);
143
David Hud2753b32019-09-23 18:46:15 +0800144 get_mailbox_msg_handle(idx, &handle);
145
146 tfm_ns_mailbox_hal_enter_critical();
147 set_queue_slot_pend(idx);
148 tfm_ns_mailbox_hal_exit_critical();
149
150 tfm_ns_mailbox_hal_notify_peer();
151
152 return handle;
153}
154
155int32_t tfm_ns_mailbox_rx_client_reply(mailbox_msg_handle_t handle,
156 int32_t *reply)
157{
158 uint8_t idx;
159 int32_t ret;
160
161 if (!mailbox_queue_ptr) {
162 return MAILBOX_INVAL_PARAMS;
163 }
164
165 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
166 return MAILBOX_INVAL_PARAMS;
167 }
168
169 ret = get_mailbox_msg_idx(handle, &idx);
170 if (ret != MAILBOX_SUCCESS) {
171 return ret;
172 }
173
174 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
175
David Hu06ebac72019-09-29 16:01:54 +0800176 /* Clear up the owner field */
177 set_msg_owner(idx, NULL);
178
David Hud2753b32019-09-23 18:46:15 +0800179 tfm_ns_mailbox_hal_enter_critical();
David Hud2753b32019-09-23 18:46:15 +0800180 clear_queue_slot_replied(idx);
David Hu3684ee72019-11-12 18:43:34 +0800181 clear_queue_slot_woken(idx);
182 /*
183 * Make sure that the empty flag is set after all the other status flags are
184 * re-initialized.
185 */
186 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800187 tfm_ns_mailbox_hal_exit_critical();
188
189 return MAILBOX_SUCCESS;
190}
191
192bool tfm_ns_mailbox_is_msg_replied(mailbox_msg_handle_t handle)
193{
194 uint8_t idx;
195 int32_t ret;
196 mailbox_queue_status_t status;
197
198 if (!mailbox_queue_ptr) {
199 return false;
200 }
201
202 if (handle == MAILBOX_MSG_NULL_HANDLE) {
203 return false;
204 }
205
206 ret = get_mailbox_msg_idx(handle, &idx);
207 if (ret != MAILBOX_SUCCESS) {
208 return false;
209 }
210
211 tfm_ns_mailbox_hal_enter_critical();
212 status = mailbox_queue_ptr->replied_slots;
213 tfm_ns_mailbox_hal_exit_critical();
214
215 if (status & (1 << idx)) {
216 return true;
217 }
218
219 return false;
220}
221
David Hu3684ee72019-11-12 18:43:34 +0800222mailbox_msg_handle_t tfm_ns_mailbox_fetch_reply_msg_isr(void)
223{
224 uint8_t idx;
225 mailbox_msg_handle_t handle;
226 mailbox_queue_status_t replied_status;
227
228 if (!mailbox_queue_ptr) {
229 return MAILBOX_MSG_NULL_HANDLE;
230 }
231
232 tfm_ns_mailbox_hal_enter_critical_isr();
233 replied_status = mailbox_queue_ptr->replied_slots;
234 tfm_ns_mailbox_hal_exit_critical_isr();
235
236 if (!replied_status) {
237 return MAILBOX_MSG_NULL_HANDLE;
238 }
239
240 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
241 /* Find the first replied message in queue */
242 if (replied_status & (0x1UL << idx)) {
243 tfm_ns_mailbox_hal_enter_critical_isr();
244 clear_queue_slot_replied(idx);
245 set_queue_slot_woken(idx);
246 tfm_ns_mailbox_hal_exit_critical_isr();
247
248 if (get_mailbox_msg_handle(idx, &handle) == MAILBOX_SUCCESS) {
249 return handle;
250 }
251 }
252 }
253
254 return MAILBOX_MSG_NULL_HANDLE;
255}
256
257const void *tfm_ns_mailbox_get_msg_owner(mailbox_msg_handle_t handle)
258{
259 uint8_t idx;
260
261 if (get_mailbox_msg_idx(handle, &idx) != MAILBOX_SUCCESS) {
262 return NULL;
263 }
264
265 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
266 return mailbox_queue_ptr->queue[idx].owner;
267 }
268
269 return NULL;
270}
271
David Hud2753b32019-09-23 18:46:15 +0800272int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
273{
274 int32_t ret;
275
276 if (!queue) {
277 return MAILBOX_INVAL_PARAMS;
278 }
279
280 /*
281 * Further verification of mailbox queue address may be required according
282 * to non-secure memory assignment.
283 */
284
285 memset(queue, 0, sizeof(*queue));
286
287 /* Initialize empty bitmask */
288 queue->empty_slots = (mailbox_queue_status_t)((1 << NUM_MAILBOX_QUEUE_SLOT)
289 - 1);
290
291 mailbox_queue_ptr = queue;
292
293 /* Platform specific initialization. */
294 ret = tfm_ns_mailbox_hal_init(queue);
295
296 return ret;
297}