blob: 4e44fae8d66d4f33eb6c4f048b1360c890c79120 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu1bd1c7b2020-05-09 14:13:20 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hu1bd1c7b2020-05-09 14:13:20 +080011#include "os_wrapper/common.h"
12#include "tfm_multi_core_api.h"
David Hud2753b32019-09-23 18:46:15 +080013#include "tfm_ns_mailbox.h"
14
15/* The pointer to NSPE mailbox queue */
16static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
17
David Hu1bd1c7b2020-05-09 14:13:20 +080018static int32_t mailbox_wait_reply(mailbox_msg_handle_t handle);
19
David Hud2753b32019-09-23 18:46:15 +080020static inline void clear_queue_slot_empty(uint8_t idx)
21{
22 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
23 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
24 }
25}
26
27static inline void set_queue_slot_empty(uint8_t idx)
28{
29 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
30 mailbox_queue_ptr->empty_slots |= (1 << idx);
31 }
32}
33
34static inline void set_queue_slot_pend(uint8_t idx)
35{
36 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
37 mailbox_queue_ptr->pend_slots |= (1 << idx);
38 }
39}
40
David Hu1bd1c7b2020-05-09 14:13:20 +080041static int32_t get_mailbox_msg_handle(uint8_t idx,
42 mailbox_msg_handle_t *handle)
David Hud2753b32019-09-23 18:46:15 +080043{
44 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
45 return MAILBOX_INVAL_PARAMS;
46 }
47
48 *handle = (mailbox_msg_handle_t)(idx + 1);
49
50 return MAILBOX_SUCCESS;
51}
52
53static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
54 uint8_t *idx)
55{
56 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
57 return MAILBOX_INVAL_PARAMS;
58 }
59
60 *idx = (uint8_t)(handle - 1);
61
62 return MAILBOX_SUCCESS;
63}
64
65static inline void clear_queue_slot_replied(uint8_t idx)
66{
67 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
68 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
69 }
70}
71
David Hu1bd1c7b2020-05-09 14:13:20 +080072static inline bool is_queue_slot_replied(uint8_t idx)
73{
74 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
75 return mailbox_queue_ptr->replied_slots & (1UL << idx);
76 }
77
78 return false;
79}
80
David Hu3684ee72019-11-12 18:43:34 +080081static inline void set_queue_slot_woken(uint8_t idx)
82{
83 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
84 mailbox_queue_ptr->queue[idx].is_woken = true;
85 }
86}
87
David Huf3e20472019-11-13 17:41:59 +080088static inline bool is_queue_slot_woken(uint8_t idx)
89{
90 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
91 return mailbox_queue_ptr->queue[idx].is_woken;
92 }
93
94 return false;
95}
96
97static inline void clear_queue_slot_woken(uint8_t idx)
98{
99 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
100 mailbox_queue_ptr->queue[idx].is_woken = false;
101 }
102}
103
David Hu1bd1c7b2020-05-09 14:13:20 +0800104static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
David Hud2753b32019-09-23 18:46:15 +0800105{
106 uint8_t idx;
107 mailbox_queue_status_t status;
108
109 tfm_ns_mailbox_hal_enter_critical();
110 status = queue->empty_slots;
111
112 if (!status) {
113 /* No empty slot */
114 tfm_ns_mailbox_hal_exit_critical();
115 return NUM_MAILBOX_QUEUE_SLOT;
116 }
117
118 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
119 if (status & (1 << idx)) {
120 break;
121 }
122 }
123
124 clear_queue_slot_empty(idx);
125
126 tfm_ns_mailbox_hal_exit_critical();
127
128 return idx;
129}
130
David Hu06ebac72019-09-29 16:01:54 +0800131static void set_msg_owner(uint8_t idx, const void *owner)
132{
133 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
134 mailbox_queue_ptr->queue[idx].owner = owner;
135 }
136}
137
David Hu65cbfb82019-11-15 17:18:12 +0800138#ifdef TFM_MULTI_CORE_TEST
David Hud86c8402020-08-12 17:58:55 +0800139/*
140 * When NSPE mailbox only covers a single non-secure core, spinlock is only
141 * required to disable IRQ.
142 */
143static inline void ns_mailbox_spin_lock(void)
144{
145 __disable_irq();
146}
147
148static inline void ns_mailbox_spin_unlock(void)
149{
150 __enable_irq();
151}
152
David Hu65cbfb82019-11-15 17:18:12 +0800153void tfm_ns_mailbox_tx_stats_init(void)
154{
155 if (!mailbox_queue_ptr) {
156 return;
157 }
158
David Hu65cbfb82019-11-15 17:18:12 +0800159 mailbox_queue_ptr->nr_tx = 0;
160 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800161}
162
163static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
164{
165 mailbox_queue_status_t empty_status;
166 uint8_t idx, nr_empty = 0;
167
168 if (!ns_queue) {
169 return;
170 }
171
172 tfm_ns_mailbox_hal_enter_critical();
David Hu65cbfb82019-11-15 17:18:12 +0800173 /* Count the number of used slots when this tx arrives */
174 empty_status = ns_queue->empty_slots;
175 tfm_ns_mailbox_hal_exit_critical();
176
177 if (empty_status) {
178 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
179 if (empty_status & (0x1UL << idx)) {
180 nr_empty++;
181 }
182 }
183 }
184
David Hu675286d2020-08-03 14:19:09 +0800185 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800186 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800187 ns_queue->nr_tx++;
188 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800189}
190
191void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
192{
193 uint32_t nr_used_slots, nr_tx;
194
195 if (!mailbox_queue_ptr || !stats_res) {
196 return;
197 }
198
David Hu65cbfb82019-11-15 17:18:12 +0800199 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
200 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800201
202 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
203 nr_used_slots %= nr_tx;
204 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
205}
206#endif
207
David Hu1bd1c7b2020-05-09 14:13:20 +0800208static int32_t mailbox_tx_client_req(uint32_t call_type,
209 const struct psa_client_params_t *params,
210 int32_t client_id,
211 mailbox_msg_handle_t *handle)
David Hud2753b32019-09-23 18:46:15 +0800212{
213 uint8_t idx;
214 struct mailbox_msg_t *msg_ptr;
David Hu06ebac72019-09-29 16:01:54 +0800215 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800216
David Hud2753b32019-09-23 18:46:15 +0800217 idx = acquire_empty_slot(mailbox_queue_ptr);
218 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
219 return MAILBOX_QUEUE_FULL;
220 }
221
David Hu65cbfb82019-11-15 17:18:12 +0800222#ifdef TFM_MULTI_CORE_TEST
223 mailbox_tx_stats_update(mailbox_queue_ptr);
224#endif
225
David Hud2753b32019-09-23 18:46:15 +0800226 /* Fill the mailbox message */
227 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
228
229 msg_ptr->call_type = call_type;
230 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
231 msg_ptr->client_id = client_id;
232
David Hu06ebac72019-09-29 16:01:54 +0800233 /*
234 * Fetch the current task handle. The task will be woken up according the
235 * handle value set in the owner field.
236 */
237 task_handle = tfm_ns_mailbox_get_task_handle();
238 set_msg_owner(idx, task_handle);
239
David Hu1bd1c7b2020-05-09 14:13:20 +0800240 get_mailbox_msg_handle(idx, handle);
David Hud2753b32019-09-23 18:46:15 +0800241
242 tfm_ns_mailbox_hal_enter_critical();
243 set_queue_slot_pend(idx);
244 tfm_ns_mailbox_hal_exit_critical();
245
246 tfm_ns_mailbox_hal_notify_peer();
247
David Hu1bd1c7b2020-05-09 14:13:20 +0800248 return MAILBOX_SUCCESS;
David Hud2753b32019-09-23 18:46:15 +0800249}
250
David Hu1bd1c7b2020-05-09 14:13:20 +0800251static int32_t mailbox_rx_client_reply(mailbox_msg_handle_t handle,
David Hud2753b32019-09-23 18:46:15 +0800252 int32_t *reply)
253{
254 uint8_t idx;
255 int32_t ret;
256
David Hud2753b32019-09-23 18:46:15 +0800257 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
258 return MAILBOX_INVAL_PARAMS;
259 }
260
261 ret = get_mailbox_msg_idx(handle, &idx);
262 if (ret != MAILBOX_SUCCESS) {
263 return ret;
264 }
265
266 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
267
David Hu06ebac72019-09-29 16:01:54 +0800268 /* Clear up the owner field */
269 set_msg_owner(idx, NULL);
270
David Hud2753b32019-09-23 18:46:15 +0800271 tfm_ns_mailbox_hal_enter_critical();
David Hu3684ee72019-11-12 18:43:34 +0800272 clear_queue_slot_woken(idx);
273 /*
274 * Make sure that the empty flag is set after all the other status flags are
275 * re-initialized.
276 */
277 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800278 tfm_ns_mailbox_hal_exit_critical();
279
280 return MAILBOX_SUCCESS;
281}
282
David Hu1bd1c7b2020-05-09 14:13:20 +0800283int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
284 const struct psa_client_params_t *params,
285 int32_t client_id,
286 int32_t *reply)
287{
288 mailbox_msg_handle_t handle = MAILBOX_MSG_NULL_HANDLE;
289 int32_t reply_buf = 0x0;
290 int32_t ret;
291
292 if (!mailbox_queue_ptr) {
293 return MAILBOX_INIT_ERROR;
294 }
295
296 if (!params || !reply) {
297 return MAILBOX_INVAL_PARAMS;
298 }
299
300 if (tfm_ns_multi_core_lock_acquire() != OS_WRAPPER_SUCCESS) {
301 return MAILBOX_QUEUE_FULL;
302 }
303
304 /* It requires SVCall if NS mailbox is put in privileged mode. */
305 ret = mailbox_tx_client_req(call_type, params, client_id, &handle);
306 if (ret != MAILBOX_SUCCESS) {
307 goto exit;
308 }
309
310 mailbox_wait_reply(handle);
311
312 /* It requires SVCall if NS mailbox is put in privileged mode. */
313 ret = mailbox_rx_client_reply(handle, &reply_buf);
314 if (ret == MAILBOX_SUCCESS) {
315 *reply = reply_buf;
316 }
317
318exit:
319 if (tfm_ns_multi_core_lock_release() != OS_WRAPPER_SUCCESS) {
320 return MAILBOX_GENERIC_ERROR;
321 }
322
323 return ret;
324}
325
326#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
327int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
David Hud2753b32019-09-23 18:46:15 +0800328{
329 uint8_t idx;
330 int32_t ret;
David Hu3684ee72019-11-12 18:43:34 +0800331 mailbox_msg_handle_t handle;
332 mailbox_queue_status_t replied_status;
333
334 if (!mailbox_queue_ptr) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800335 return MAILBOX_INIT_ERROR;
David Hu3684ee72019-11-12 18:43:34 +0800336 }
337
338 tfm_ns_mailbox_hal_enter_critical_isr();
339 replied_status = mailbox_queue_ptr->replied_slots;
340 tfm_ns_mailbox_hal_exit_critical_isr();
341
342 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800343 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800344 }
345
346 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
347 /* Find the first replied message in queue */
348 if (replied_status & (0x1UL << idx)) {
349 tfm_ns_mailbox_hal_enter_critical_isr();
350 clear_queue_slot_replied(idx);
351 set_queue_slot_woken(idx);
352 tfm_ns_mailbox_hal_exit_critical_isr();
353
David Hu1bd1c7b2020-05-09 14:13:20 +0800354 break;
David Hu3684ee72019-11-12 18:43:34 +0800355 }
356 }
357
David Hu1bd1c7b2020-05-09 14:13:20 +0800358 /* In theory, it won't occur. Just in case */
359 if (idx == NUM_MAILBOX_QUEUE_SLOT) {
360 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800361 }
362
David Hu1bd1c7b2020-05-09 14:13:20 +0800363 ret = get_mailbox_msg_handle(idx, &handle);
364 if (ret != MAILBOX_SUCCESS) {
365 return ret;
David Hu3684ee72019-11-12 18:43:34 +0800366 }
367
David Hu1bd1c7b2020-05-09 14:13:20 +0800368 tfm_ns_mailbox_hal_wake_task_isr(mailbox_queue_ptr->queue[idx].owner,
369 handle);
370
371 return MAILBOX_SUCCESS;
David Hu3684ee72019-11-12 18:43:34 +0800372}
David Hu1bd1c7b2020-05-09 14:13:20 +0800373#endif
David Hu3684ee72019-11-12 18:43:34 +0800374
David Hud2753b32019-09-23 18:46:15 +0800375int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
376{
377 int32_t ret;
378
379 if (!queue) {
380 return MAILBOX_INVAL_PARAMS;
381 }
382
383 /*
384 * Further verification of mailbox queue address may be required according
385 * to non-secure memory assignment.
386 */
387
388 memset(queue, 0, sizeof(*queue));
389
390 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800391 queue->empty_slots =
392 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
393 queue->empty_slots +=
394 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800395
396 mailbox_queue_ptr = queue;
397
398 /* Platform specific initialization. */
399 ret = tfm_ns_mailbox_hal_init(queue);
400
David Hu65cbfb82019-11-15 17:18:12 +0800401#ifdef TFM_MULTI_CORE_TEST
402 tfm_ns_mailbox_tx_stats_init();
403#endif
404
David Hud2753b32019-09-23 18:46:15 +0800405 return ret;
406}
David Huf3e20472019-11-13 17:41:59 +0800407
David Hu1bd1c7b2020-05-09 14:13:20 +0800408static int32_t mailbox_wait_reply(mailbox_msg_handle_t handle)
David Huf3e20472019-11-13 17:41:59 +0800409{
410 uint8_t idx;
411 int32_t ret;
412
David Huf3e20472019-11-13 17:41:59 +0800413 if (handle == MAILBOX_MSG_NULL_HANDLE) {
414 return MAILBOX_INVAL_PARAMS;
415 }
416
417 ret = get_mailbox_msg_idx(handle, &idx);
418 if (ret != MAILBOX_SUCCESS) {
419 return ret;
420 }
421
422 while (1) {
423 tfm_ns_mailbox_hal_wait_reply(handle);
424
425 /*
426 * Woken up from sleep
427 * Check the completed flag to make sure that the current thread is
428 * woken up by reply event, rather than other events.
429 */
430 tfm_ns_mailbox_hal_enter_critical();
David Hu1bd1c7b2020-05-09 14:13:20 +0800431 /*
432 * It requires SVCall to access NS mailbox flags if NS mailbox is put
433 * in privileged mode.
434 * An alternative is to let NS thread allocate its own is_woken flag.
435 * But a spinlock-like mechanism is still required.
436 */
437#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
David Huf3e20472019-11-13 17:41:59 +0800438 if (is_queue_slot_woken(idx)) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800439 clear_queue_slot_woken(idx);
David Huf3e20472019-11-13 17:41:59 +0800440 break;
441 }
David Hu1bd1c7b2020-05-09 14:13:20 +0800442#else
443 if (is_queue_slot_replied(idx)) {
444 clear_queue_slot_replied(idx);
445 break;
446 }
447#endif
David Huf3e20472019-11-13 17:41:59 +0800448 tfm_ns_mailbox_hal_exit_critical();
449 }
450
David Hu1bd1c7b2020-05-09 14:13:20 +0800451 tfm_ns_mailbox_hal_exit_critical();
452
David Huf3e20472019-11-13 17:41:59 +0800453 return MAILBOX_SUCCESS;
454}