blob: b256fa202720d043f51eaafcb243f2594fc11185 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu1bd1c7b2020-05-09 14:13:20 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hud2753b32019-09-23 18:46:15 +080011#include "tfm_ns_mailbox.h"
12
13/* The pointer to NSPE mailbox queue */
14static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
15
David Hu1bd1c7b2020-05-09 14:13:20 +080016static int32_t mailbox_wait_reply(mailbox_msg_handle_t handle);
17
David Hud2753b32019-09-23 18:46:15 +080018static inline void clear_queue_slot_empty(uint8_t idx)
19{
20 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
21 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
22 }
23}
24
25static inline void set_queue_slot_empty(uint8_t idx)
26{
27 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
28 mailbox_queue_ptr->empty_slots |= (1 << idx);
29 }
30}
31
32static inline void set_queue_slot_pend(uint8_t idx)
33{
34 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
35 mailbox_queue_ptr->pend_slots |= (1 << idx);
36 }
37}
38
David Hu1bd1c7b2020-05-09 14:13:20 +080039static int32_t get_mailbox_msg_handle(uint8_t idx,
40 mailbox_msg_handle_t *handle)
David Hud2753b32019-09-23 18:46:15 +080041{
42 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
43 return MAILBOX_INVAL_PARAMS;
44 }
45
46 *handle = (mailbox_msg_handle_t)(idx + 1);
47
48 return MAILBOX_SUCCESS;
49}
50
51static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
52 uint8_t *idx)
53{
54 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
55 return MAILBOX_INVAL_PARAMS;
56 }
57
58 *idx = (uint8_t)(handle - 1);
59
60 return MAILBOX_SUCCESS;
61}
62
63static inline void clear_queue_slot_replied(uint8_t idx)
64{
65 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
66 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
67 }
68}
69
David Hu1bd1c7b2020-05-09 14:13:20 +080070static inline bool is_queue_slot_replied(uint8_t idx)
71{
72 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
73 return mailbox_queue_ptr->replied_slots & (1UL << idx);
74 }
75
76 return false;
77}
78
David Hu3684ee72019-11-12 18:43:34 +080079static inline void set_queue_slot_woken(uint8_t idx)
80{
81 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
82 mailbox_queue_ptr->queue[idx].is_woken = true;
83 }
84}
85
David Huf3e20472019-11-13 17:41:59 +080086static inline bool is_queue_slot_woken(uint8_t idx)
87{
88 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
89 return mailbox_queue_ptr->queue[idx].is_woken;
90 }
91
92 return false;
93}
94
95static inline void clear_queue_slot_woken(uint8_t idx)
96{
97 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
98 mailbox_queue_ptr->queue[idx].is_woken = false;
99 }
100}
101
David Hu1bd1c7b2020-05-09 14:13:20 +0800102static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
David Hud2753b32019-09-23 18:46:15 +0800103{
104 uint8_t idx;
105 mailbox_queue_status_t status;
106
107 tfm_ns_mailbox_hal_enter_critical();
108 status = queue->empty_slots;
109
110 if (!status) {
111 /* No empty slot */
112 tfm_ns_mailbox_hal_exit_critical();
113 return NUM_MAILBOX_QUEUE_SLOT;
114 }
115
116 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
117 if (status & (1 << idx)) {
118 break;
119 }
120 }
121
122 clear_queue_slot_empty(idx);
123
124 tfm_ns_mailbox_hal_exit_critical();
125
126 return idx;
127}
128
David Hu06ebac72019-09-29 16:01:54 +0800129static void set_msg_owner(uint8_t idx, const void *owner)
130{
131 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
132 mailbox_queue_ptr->queue[idx].owner = owner;
133 }
134}
135
David Hu65cbfb82019-11-15 17:18:12 +0800136#ifdef TFM_MULTI_CORE_TEST
David Hud86c8402020-08-12 17:58:55 +0800137/*
138 * When NSPE mailbox only covers a single non-secure core, spinlock is only
139 * required to disable IRQ.
140 */
141static inline void ns_mailbox_spin_lock(void)
142{
143 __disable_irq();
144}
145
146static inline void ns_mailbox_spin_unlock(void)
147{
148 __enable_irq();
149}
150
David Hu65cbfb82019-11-15 17:18:12 +0800151void tfm_ns_mailbox_tx_stats_init(void)
152{
153 if (!mailbox_queue_ptr) {
154 return;
155 }
156
David Hu65cbfb82019-11-15 17:18:12 +0800157 mailbox_queue_ptr->nr_tx = 0;
158 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800159}
160
161static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
162{
163 mailbox_queue_status_t empty_status;
164 uint8_t idx, nr_empty = 0;
165
166 if (!ns_queue) {
167 return;
168 }
169
170 tfm_ns_mailbox_hal_enter_critical();
David Hu65cbfb82019-11-15 17:18:12 +0800171 /* Count the number of used slots when this tx arrives */
172 empty_status = ns_queue->empty_slots;
173 tfm_ns_mailbox_hal_exit_critical();
174
175 if (empty_status) {
176 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
177 if (empty_status & (0x1UL << idx)) {
178 nr_empty++;
179 }
180 }
181 }
182
David Hu675286d2020-08-03 14:19:09 +0800183 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800184 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800185 ns_queue->nr_tx++;
186 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800187}
188
189void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
190{
191 uint32_t nr_used_slots, nr_tx;
192
193 if (!mailbox_queue_ptr || !stats_res) {
194 return;
195 }
196
David Hu65cbfb82019-11-15 17:18:12 +0800197 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
198 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800199
200 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
201 nr_used_slots %= nr_tx;
202 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
203}
204#endif
205
David Hu1bd1c7b2020-05-09 14:13:20 +0800206static int32_t mailbox_tx_client_req(uint32_t call_type,
207 const struct psa_client_params_t *params,
208 int32_t client_id,
209 mailbox_msg_handle_t *handle)
David Hud2753b32019-09-23 18:46:15 +0800210{
211 uint8_t idx;
212 struct mailbox_msg_t *msg_ptr;
David Hu06ebac72019-09-29 16:01:54 +0800213 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800214
David Hud2753b32019-09-23 18:46:15 +0800215 idx = acquire_empty_slot(mailbox_queue_ptr);
216 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
217 return MAILBOX_QUEUE_FULL;
218 }
219
David Hu65cbfb82019-11-15 17:18:12 +0800220#ifdef TFM_MULTI_CORE_TEST
221 mailbox_tx_stats_update(mailbox_queue_ptr);
222#endif
223
David Hud2753b32019-09-23 18:46:15 +0800224 /* Fill the mailbox message */
225 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
226
227 msg_ptr->call_type = call_type;
228 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
229 msg_ptr->client_id = client_id;
230
David Hu06ebac72019-09-29 16:01:54 +0800231 /*
232 * Fetch the current task handle. The task will be woken up according the
233 * handle value set in the owner field.
234 */
David Hu69e590e2020-05-12 17:19:21 +0800235 task_handle = tfm_ns_mailbox_os_get_task_handle();
David Hu06ebac72019-09-29 16:01:54 +0800236 set_msg_owner(idx, task_handle);
237
David Hu1bd1c7b2020-05-09 14:13:20 +0800238 get_mailbox_msg_handle(idx, handle);
David Hud2753b32019-09-23 18:46:15 +0800239
240 tfm_ns_mailbox_hal_enter_critical();
241 set_queue_slot_pend(idx);
242 tfm_ns_mailbox_hal_exit_critical();
243
244 tfm_ns_mailbox_hal_notify_peer();
245
David Hu1bd1c7b2020-05-09 14:13:20 +0800246 return MAILBOX_SUCCESS;
David Hud2753b32019-09-23 18:46:15 +0800247}
248
David Hu1bd1c7b2020-05-09 14:13:20 +0800249static int32_t mailbox_rx_client_reply(mailbox_msg_handle_t handle,
David Hud2753b32019-09-23 18:46:15 +0800250 int32_t *reply)
251{
252 uint8_t idx;
253 int32_t ret;
254
David Hud2753b32019-09-23 18:46:15 +0800255 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
256 return MAILBOX_INVAL_PARAMS;
257 }
258
259 ret = get_mailbox_msg_idx(handle, &idx);
260 if (ret != MAILBOX_SUCCESS) {
261 return ret;
262 }
263
264 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
265
David Hu06ebac72019-09-29 16:01:54 +0800266 /* Clear up the owner field */
267 set_msg_owner(idx, NULL);
268
David Hud2753b32019-09-23 18:46:15 +0800269 tfm_ns_mailbox_hal_enter_critical();
David Hu3684ee72019-11-12 18:43:34 +0800270 clear_queue_slot_woken(idx);
271 /*
272 * Make sure that the empty flag is set after all the other status flags are
273 * re-initialized.
274 */
275 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800276 tfm_ns_mailbox_hal_exit_critical();
277
278 return MAILBOX_SUCCESS;
279}
280
David Hu1bd1c7b2020-05-09 14:13:20 +0800281int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
282 const struct psa_client_params_t *params,
283 int32_t client_id,
284 int32_t *reply)
285{
286 mailbox_msg_handle_t handle = MAILBOX_MSG_NULL_HANDLE;
287 int32_t reply_buf = 0x0;
288 int32_t ret;
289
290 if (!mailbox_queue_ptr) {
291 return MAILBOX_INIT_ERROR;
292 }
293
294 if (!params || !reply) {
295 return MAILBOX_INVAL_PARAMS;
296 }
297
David Hu69e590e2020-05-12 17:19:21 +0800298 if (tfm_ns_mailbox_os_lock_acquire() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800299 return MAILBOX_QUEUE_FULL;
300 }
301
302 /* It requires SVCall if NS mailbox is put in privileged mode. */
303 ret = mailbox_tx_client_req(call_type, params, client_id, &handle);
304 if (ret != MAILBOX_SUCCESS) {
305 goto exit;
306 }
307
308 mailbox_wait_reply(handle);
309
310 /* It requires SVCall if NS mailbox is put in privileged mode. */
311 ret = mailbox_rx_client_reply(handle, &reply_buf);
312 if (ret == MAILBOX_SUCCESS) {
313 *reply = reply_buf;
314 }
315
316exit:
David Hu69e590e2020-05-12 17:19:21 +0800317 if (tfm_ns_mailbox_os_lock_release() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800318 return MAILBOX_GENERIC_ERROR;
319 }
320
321 return ret;
322}
323
324#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
325int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
David Hud2753b32019-09-23 18:46:15 +0800326{
327 uint8_t idx;
328 int32_t ret;
David Hu3684ee72019-11-12 18:43:34 +0800329 mailbox_msg_handle_t handle;
330 mailbox_queue_status_t replied_status;
331
332 if (!mailbox_queue_ptr) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800333 return MAILBOX_INIT_ERROR;
David Hu3684ee72019-11-12 18:43:34 +0800334 }
335
336 tfm_ns_mailbox_hal_enter_critical_isr();
337 replied_status = mailbox_queue_ptr->replied_slots;
338 tfm_ns_mailbox_hal_exit_critical_isr();
339
340 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800341 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800342 }
343
344 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
345 /* Find the first replied message in queue */
346 if (replied_status & (0x1UL << idx)) {
347 tfm_ns_mailbox_hal_enter_critical_isr();
348 clear_queue_slot_replied(idx);
349 set_queue_slot_woken(idx);
350 tfm_ns_mailbox_hal_exit_critical_isr();
351
David Hu1bd1c7b2020-05-09 14:13:20 +0800352 break;
David Hu3684ee72019-11-12 18:43:34 +0800353 }
354 }
355
David Hu1bd1c7b2020-05-09 14:13:20 +0800356 /* In theory, it won't occur. Just in case */
357 if (idx == NUM_MAILBOX_QUEUE_SLOT) {
358 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800359 }
360
David Hu1bd1c7b2020-05-09 14:13:20 +0800361 ret = get_mailbox_msg_handle(idx, &handle);
362 if (ret != MAILBOX_SUCCESS) {
363 return ret;
David Hu3684ee72019-11-12 18:43:34 +0800364 }
365
David Hu69e590e2020-05-12 17:19:21 +0800366 tfm_ns_mailbox_os_wake_task_isr(mailbox_queue_ptr->queue[idx].owner,
367 handle);
David Hu1bd1c7b2020-05-09 14:13:20 +0800368
369 return MAILBOX_SUCCESS;
David Hu3684ee72019-11-12 18:43:34 +0800370}
David Hu1bd1c7b2020-05-09 14:13:20 +0800371#endif
David Hu3684ee72019-11-12 18:43:34 +0800372
David Hud2753b32019-09-23 18:46:15 +0800373int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
374{
375 int32_t ret;
376
377 if (!queue) {
378 return MAILBOX_INVAL_PARAMS;
379 }
380
381 /*
382 * Further verification of mailbox queue address may be required according
383 * to non-secure memory assignment.
384 */
385
386 memset(queue, 0, sizeof(*queue));
387
388 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800389 queue->empty_slots =
390 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
391 queue->empty_slots +=
392 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800393
394 mailbox_queue_ptr = queue;
395
396 /* Platform specific initialization. */
397 ret = tfm_ns_mailbox_hal_init(queue);
David Hu69e590e2020-05-12 17:19:21 +0800398 if (ret != MAILBOX_SUCCESS) {
399 return ret;
400 }
401
402 ret = tfm_ns_mailbox_os_lock_init();
David Hud2753b32019-09-23 18:46:15 +0800403
David Hu65cbfb82019-11-15 17:18:12 +0800404#ifdef TFM_MULTI_CORE_TEST
405 tfm_ns_mailbox_tx_stats_init();
406#endif
407
David Hud2753b32019-09-23 18:46:15 +0800408 return ret;
409}
David Huf3e20472019-11-13 17:41:59 +0800410
David Hu1bd1c7b2020-05-09 14:13:20 +0800411static int32_t mailbox_wait_reply(mailbox_msg_handle_t handle)
David Huf3e20472019-11-13 17:41:59 +0800412{
413 uint8_t idx;
414 int32_t ret;
415
David Huf3e20472019-11-13 17:41:59 +0800416 if (handle == MAILBOX_MSG_NULL_HANDLE) {
417 return MAILBOX_INVAL_PARAMS;
418 }
419
420 ret = get_mailbox_msg_idx(handle, &idx);
421 if (ret != MAILBOX_SUCCESS) {
422 return ret;
423 }
424
425 while (1) {
David Hu69e590e2020-05-12 17:19:21 +0800426 tfm_ns_mailbox_os_wait_reply(handle);
David Huf3e20472019-11-13 17:41:59 +0800427
428 /*
429 * Woken up from sleep
430 * Check the completed flag to make sure that the current thread is
431 * woken up by reply event, rather than other events.
432 */
433 tfm_ns_mailbox_hal_enter_critical();
David Hu1bd1c7b2020-05-09 14:13:20 +0800434 /*
435 * It requires SVCall to access NS mailbox flags if NS mailbox is put
436 * in privileged mode.
437 * An alternative is to let NS thread allocate its own is_woken flag.
438 * But a spinlock-like mechanism is still required.
439 */
440#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
David Huf3e20472019-11-13 17:41:59 +0800441 if (is_queue_slot_woken(idx)) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800442 clear_queue_slot_woken(idx);
David Huf3e20472019-11-13 17:41:59 +0800443 break;
444 }
David Hu1bd1c7b2020-05-09 14:13:20 +0800445#else
446 if (is_queue_slot_replied(idx)) {
447 clear_queue_slot_replied(idx);
448 break;
449 }
450#endif
David Huf3e20472019-11-13 17:41:59 +0800451 tfm_ns_mailbox_hal_exit_critical();
452 }
453
David Hu1bd1c7b2020-05-09 14:13:20 +0800454 tfm_ns_mailbox_hal_exit_critical();
455
David Huf3e20472019-11-13 17:41:59 +0800456 return MAILBOX_SUCCESS;
457}