blob: 0b96e65951fca9f28cd588f240887fb2915d6e79 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu06ebac72019-09-29 16:01:54 +08002 * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
9#include "tfm_ns_mailbox.h"
David Hu06ebac72019-09-29 16:01:54 +080010#include "tfm_plat_ns.h"
David Hud2753b32019-09-23 18:46:15 +080011
12/* The pointer to NSPE mailbox queue */
13static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
14
15static inline void clear_queue_slot_empty(uint8_t idx)
16{
17 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
18 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
19 }
20}
21
22static inline void set_queue_slot_empty(uint8_t idx)
23{
24 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
25 mailbox_queue_ptr->empty_slots |= (1 << idx);
26 }
27}
28
29static inline void set_queue_slot_pend(uint8_t idx)
30{
31 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
32 mailbox_queue_ptr->pend_slots |= (1 << idx);
33 }
34}
35
36static inline int32_t get_mailbox_msg_handle(uint8_t idx,
37 mailbox_msg_handle_t *handle)
38{
39 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
40 return MAILBOX_INVAL_PARAMS;
41 }
42
43 *handle = (mailbox_msg_handle_t)(idx + 1);
44
45 return MAILBOX_SUCCESS;
46}
47
48static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
49 uint8_t *idx)
50{
51 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
52 return MAILBOX_INVAL_PARAMS;
53 }
54
55 *idx = (uint8_t)(handle - 1);
56
57 return MAILBOX_SUCCESS;
58}
59
60static inline void clear_queue_slot_replied(uint8_t idx)
61{
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
64 }
65}
66
David Hu3684ee72019-11-12 18:43:34 +080067static inline void set_queue_slot_woken(uint8_t idx)
68{
69 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
70 mailbox_queue_ptr->queue[idx].is_woken = true;
71 }
72}
73
David Huf3e20472019-11-13 17:41:59 +080074static inline bool is_queue_slot_woken(uint8_t idx)
75{
76 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
77 return mailbox_queue_ptr->queue[idx].is_woken;
78 }
79
80 return false;
81}
82
83static inline void clear_queue_slot_woken(uint8_t idx)
84{
85 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
86 mailbox_queue_ptr->queue[idx].is_woken = false;
87 }
88}
89
David Hud2753b32019-09-23 18:46:15 +080090static uint8_t acquire_empty_slot(const struct ns_mailbox_queue_t *queue)
91{
92 uint8_t idx;
93 mailbox_queue_status_t status;
94
95 tfm_ns_mailbox_hal_enter_critical();
96 status = queue->empty_slots;
97
98 if (!status) {
99 /* No empty slot */
100 tfm_ns_mailbox_hal_exit_critical();
101 return NUM_MAILBOX_QUEUE_SLOT;
102 }
103
104 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
105 if (status & (1 << idx)) {
106 break;
107 }
108 }
109
110 clear_queue_slot_empty(idx);
111
112 tfm_ns_mailbox_hal_exit_critical();
113
114 return idx;
115}
116
David Hu06ebac72019-09-29 16:01:54 +0800117static void set_msg_owner(uint8_t idx, const void *owner)
118{
119 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
120 mailbox_queue_ptr->queue[idx].owner = owner;
121 }
122}
123
David Hu65cbfb82019-11-15 17:18:12 +0800124#ifdef TFM_MULTI_CORE_TEST
125void tfm_ns_mailbox_tx_stats_init(void)
126{
127 if (!mailbox_queue_ptr) {
128 return;
129 }
130
131 tfm_ns_mailbox_hal_enter_critical();
132
133 mailbox_queue_ptr->nr_tx = 0;
134 mailbox_queue_ptr->nr_used_slots = 0;
135
136 tfm_ns_mailbox_hal_exit_critical();
137}
138
139static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
140{
141 mailbox_queue_status_t empty_status;
142 uint8_t idx, nr_empty = 0;
143
144 if (!ns_queue) {
145 return;
146 }
147
148 tfm_ns_mailbox_hal_enter_critical();
149
150 ns_queue->nr_tx++;
151
152 /* Count the number of used slots when this tx arrives */
153 empty_status = ns_queue->empty_slots;
154 tfm_ns_mailbox_hal_exit_critical();
155
156 if (empty_status) {
157 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
158 if (empty_status & (0x1UL << idx)) {
159 nr_empty++;
160 }
161 }
162 }
163
164 tfm_ns_mailbox_hal_enter_critical();
165 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
166 tfm_ns_mailbox_hal_exit_critical();
167}
168
169void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
170{
171 uint32_t nr_used_slots, nr_tx;
172
173 if (!mailbox_queue_ptr || !stats_res) {
174 return;
175 }
176
177 tfm_ns_mailbox_hal_enter_critical();
178 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
179 nr_tx = mailbox_queue_ptr->nr_tx;
180 tfm_ns_mailbox_hal_exit_critical();
181
182 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
183 nr_used_slots %= nr_tx;
184 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
185}
186#endif
187
David Hud2753b32019-09-23 18:46:15 +0800188mailbox_msg_handle_t tfm_ns_mailbox_tx_client_req(uint32_t call_type,
189 const struct psa_client_params_t *params,
190 int32_t client_id)
191{
192 uint8_t idx;
193 struct mailbox_msg_t *msg_ptr;
194 mailbox_msg_handle_t handle;
David Hu06ebac72019-09-29 16:01:54 +0800195 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800196
197 if (!mailbox_queue_ptr) {
198 return MAILBOX_MSG_NULL_HANDLE;
199 }
200
201 if (!params) {
202 return MAILBOX_MSG_NULL_HANDLE;
203 }
204
205 idx = acquire_empty_slot(mailbox_queue_ptr);
206 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
207 return MAILBOX_QUEUE_FULL;
208 }
209
David Hu65cbfb82019-11-15 17:18:12 +0800210#ifdef TFM_MULTI_CORE_TEST
211 mailbox_tx_stats_update(mailbox_queue_ptr);
212#endif
213
David Hud2753b32019-09-23 18:46:15 +0800214 /* Fill the mailbox message */
215 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
216
217 msg_ptr->call_type = call_type;
218 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
219 msg_ptr->client_id = client_id;
220
David Hu06ebac72019-09-29 16:01:54 +0800221 /*
222 * Fetch the current task handle. The task will be woken up according the
223 * handle value set in the owner field.
224 */
225 task_handle = tfm_ns_mailbox_get_task_handle();
226 set_msg_owner(idx, task_handle);
227
David Hud2753b32019-09-23 18:46:15 +0800228 get_mailbox_msg_handle(idx, &handle);
229
230 tfm_ns_mailbox_hal_enter_critical();
231 set_queue_slot_pend(idx);
232 tfm_ns_mailbox_hal_exit_critical();
233
234 tfm_ns_mailbox_hal_notify_peer();
235
236 return handle;
237}
238
239int32_t tfm_ns_mailbox_rx_client_reply(mailbox_msg_handle_t handle,
240 int32_t *reply)
241{
242 uint8_t idx;
243 int32_t ret;
244
245 if (!mailbox_queue_ptr) {
246 return MAILBOX_INVAL_PARAMS;
247 }
248
249 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
250 return MAILBOX_INVAL_PARAMS;
251 }
252
253 ret = get_mailbox_msg_idx(handle, &idx);
254 if (ret != MAILBOX_SUCCESS) {
255 return ret;
256 }
257
258 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
259
David Hu06ebac72019-09-29 16:01:54 +0800260 /* Clear up the owner field */
261 set_msg_owner(idx, NULL);
262
David Hud2753b32019-09-23 18:46:15 +0800263 tfm_ns_mailbox_hal_enter_critical();
David Hud2753b32019-09-23 18:46:15 +0800264 clear_queue_slot_replied(idx);
David Hu3684ee72019-11-12 18:43:34 +0800265 clear_queue_slot_woken(idx);
266 /*
267 * Make sure that the empty flag is set after all the other status flags are
268 * re-initialized.
269 */
270 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800271 tfm_ns_mailbox_hal_exit_critical();
272
273 return MAILBOX_SUCCESS;
274}
275
276bool tfm_ns_mailbox_is_msg_replied(mailbox_msg_handle_t handle)
277{
278 uint8_t idx;
279 int32_t ret;
280 mailbox_queue_status_t status;
281
282 if (!mailbox_queue_ptr) {
283 return false;
284 }
285
286 if (handle == MAILBOX_MSG_NULL_HANDLE) {
287 return false;
288 }
289
290 ret = get_mailbox_msg_idx(handle, &idx);
291 if (ret != MAILBOX_SUCCESS) {
292 return false;
293 }
294
295 tfm_ns_mailbox_hal_enter_critical();
296 status = mailbox_queue_ptr->replied_slots;
297 tfm_ns_mailbox_hal_exit_critical();
298
299 if (status & (1 << idx)) {
300 return true;
301 }
302
303 return false;
304}
305
David Hu3684ee72019-11-12 18:43:34 +0800306mailbox_msg_handle_t tfm_ns_mailbox_fetch_reply_msg_isr(void)
307{
308 uint8_t idx;
309 mailbox_msg_handle_t handle;
310 mailbox_queue_status_t replied_status;
311
312 if (!mailbox_queue_ptr) {
313 return MAILBOX_MSG_NULL_HANDLE;
314 }
315
316 tfm_ns_mailbox_hal_enter_critical_isr();
317 replied_status = mailbox_queue_ptr->replied_slots;
318 tfm_ns_mailbox_hal_exit_critical_isr();
319
320 if (!replied_status) {
321 return MAILBOX_MSG_NULL_HANDLE;
322 }
323
324 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
325 /* Find the first replied message in queue */
326 if (replied_status & (0x1UL << idx)) {
327 tfm_ns_mailbox_hal_enter_critical_isr();
328 clear_queue_slot_replied(idx);
329 set_queue_slot_woken(idx);
330 tfm_ns_mailbox_hal_exit_critical_isr();
331
332 if (get_mailbox_msg_handle(idx, &handle) == MAILBOX_SUCCESS) {
333 return handle;
334 }
335 }
336 }
337
338 return MAILBOX_MSG_NULL_HANDLE;
339}
340
341const void *tfm_ns_mailbox_get_msg_owner(mailbox_msg_handle_t handle)
342{
343 uint8_t idx;
344
345 if (get_mailbox_msg_idx(handle, &idx) != MAILBOX_SUCCESS) {
346 return NULL;
347 }
348
349 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
350 return mailbox_queue_ptr->queue[idx].owner;
351 }
352
353 return NULL;
354}
355
David Hud2753b32019-09-23 18:46:15 +0800356int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
357{
358 int32_t ret;
359
360 if (!queue) {
361 return MAILBOX_INVAL_PARAMS;
362 }
363
364 /*
365 * Further verification of mailbox queue address may be required according
366 * to non-secure memory assignment.
367 */
368
369 memset(queue, 0, sizeof(*queue));
370
371 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800372 queue->empty_slots =
373 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
374 queue->empty_slots +=
375 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800376
377 mailbox_queue_ptr = queue;
378
379 /* Platform specific initialization. */
380 ret = tfm_ns_mailbox_hal_init(queue);
381
David Hu65cbfb82019-11-15 17:18:12 +0800382#ifdef TFM_MULTI_CORE_TEST
383 tfm_ns_mailbox_tx_stats_init();
384#endif
385
David Hud2753b32019-09-23 18:46:15 +0800386 return ret;
387}
David Huf3e20472019-11-13 17:41:59 +0800388
389#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
390int32_t tfm_ns_mailbox_wait_reply(mailbox_msg_handle_t handle)
391{
392 uint8_t idx;
393 int32_t ret;
394
395 if (!mailbox_queue_ptr) {
396 return MAILBOX_INVAL_PARAMS;
397 }
398
399 if (handle == MAILBOX_MSG_NULL_HANDLE) {
400 return MAILBOX_INVAL_PARAMS;
401 }
402
403 ret = get_mailbox_msg_idx(handle, &idx);
404 if (ret != MAILBOX_SUCCESS) {
405 return ret;
406 }
407
408 while (1) {
409 tfm_ns_mailbox_hal_wait_reply(handle);
410
411 /*
412 * Woken up from sleep
413 * Check the completed flag to make sure that the current thread is
414 * woken up by reply event, rather than other events.
415 */
416 tfm_ns_mailbox_hal_enter_critical();
417 if (is_queue_slot_woken(idx)) {
418 tfm_ns_mailbox_hal_exit_critical();
419 break;
420 }
421 tfm_ns_mailbox_hal_exit_critical();
422 }
423
424 return MAILBOX_SUCCESS;
425}
426#endif