blob: 2096b6e523f0f87d91c6cfd84519712924bb709d [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu1bd1c7b2020-05-09 14:13:20 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hud2753b32019-09-23 18:46:15 +080011#include "tfm_ns_mailbox.h"
12
13/* The pointer to NSPE mailbox queue */
14static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
15
David Hu6730af22020-05-11 19:50:08 +080016static int32_t mailbox_wait_reply(uint8_t idx);
David Hu1bd1c7b2020-05-09 14:13:20 +080017
David Hud2753b32019-09-23 18:46:15 +080018static inline void clear_queue_slot_empty(uint8_t idx)
19{
20 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
21 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
22 }
23}
24
25static inline void set_queue_slot_empty(uint8_t idx)
26{
27 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
28 mailbox_queue_ptr->empty_slots |= (1 << idx);
29 }
30}
31
32static inline void set_queue_slot_pend(uint8_t idx)
33{
34 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
35 mailbox_queue_ptr->pend_slots |= (1 << idx);
36 }
37}
38
David Hud2753b32019-09-23 18:46:15 +080039static inline void clear_queue_slot_replied(uint8_t idx)
40{
41 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
42 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
43 }
44}
45
David Hu94830372020-05-13 16:37:34 +080046static inline void clear_queue_slot_all_replied(mailbox_queue_status_t status)
47{
48 mailbox_queue_ptr->replied_slots &= ~status;
49}
50
David Hu1bd1c7b2020-05-09 14:13:20 +080051static inline bool is_queue_slot_replied(uint8_t idx)
52{
53 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
54 return mailbox_queue_ptr->replied_slots & (1UL << idx);
55 }
56
57 return false;
58}
59
David Hu3684ee72019-11-12 18:43:34 +080060static inline void set_queue_slot_woken(uint8_t idx)
61{
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 mailbox_queue_ptr->queue[idx].is_woken = true;
64 }
65}
66
David Huf3e20472019-11-13 17:41:59 +080067static inline bool is_queue_slot_woken(uint8_t idx)
68{
69 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
70 return mailbox_queue_ptr->queue[idx].is_woken;
71 }
72
73 return false;
74}
75
76static inline void clear_queue_slot_woken(uint8_t idx)
77{
78 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
79 mailbox_queue_ptr->queue[idx].is_woken = false;
80 }
81}
82
David Hu2d2a2f12020-08-09 15:27:02 +080083#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
84/*
85 * When NSPE mailbox only covers a single non-secure core, spinlock only
86 * requires to disable IRQ.
87 */
88static inline void ns_mailbox_spin_lock(void)
89{
90 __disable_irq();
91}
92
93/*
94 * It is assumed that IRQ is always enabled when spinlock is acquired.
95 * Otherwise, the waiting thread won't be woken up.
96 */
97static inline void ns_mailbox_spin_unlock(void)
98{
99 __enable_irq();
100}
101#else /* TFM_MULTI_CORE_MULTI_CLIENT_CALL */
102/*
103 * Local spinlock is implemented as a dummy one when multiple PSA client call
104 * feature is disabled, since interrupt is not required in NS mailbox.
105 */
106#define ns_mailbox_spin_lock() do {} while (0)
107
108#define ns_mailbox_spin_unlock() do {} while (0)
109#endif /* TFM_MULTI_CORE_MULTI_CLIENT_CALL */
110
David Hu1bd1c7b2020-05-09 14:13:20 +0800111static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
David Hud2753b32019-09-23 18:46:15 +0800112{
113 uint8_t idx;
114 mailbox_queue_status_t status;
115
David Hu2d2a2f12020-08-09 15:27:02 +0800116 ns_mailbox_spin_lock();
David Hud2753b32019-09-23 18:46:15 +0800117 status = queue->empty_slots;
David Hu2d2a2f12020-08-09 15:27:02 +0800118 ns_mailbox_spin_unlock();
David Hud2753b32019-09-23 18:46:15 +0800119
120 if (!status) {
121 /* No empty slot */
David Hud2753b32019-09-23 18:46:15 +0800122 return NUM_MAILBOX_QUEUE_SLOT;
123 }
124
125 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
126 if (status & (1 << idx)) {
127 break;
128 }
129 }
130
David Hu2d2a2f12020-08-09 15:27:02 +0800131 ns_mailbox_spin_lock();
David Hud2753b32019-09-23 18:46:15 +0800132 clear_queue_slot_empty(idx);
David Hu2d2a2f12020-08-09 15:27:02 +0800133 ns_mailbox_spin_unlock();
David Hud2753b32019-09-23 18:46:15 +0800134
135 return idx;
136}
137
David Hu06ebac72019-09-29 16:01:54 +0800138static void set_msg_owner(uint8_t idx, const void *owner)
139{
140 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
141 mailbox_queue_ptr->queue[idx].owner = owner;
142 }
143}
144
David Hu65cbfb82019-11-15 17:18:12 +0800145#ifdef TFM_MULTI_CORE_TEST
146void tfm_ns_mailbox_tx_stats_init(void)
147{
148 if (!mailbox_queue_ptr) {
149 return;
150 }
151
David Hu65cbfb82019-11-15 17:18:12 +0800152 mailbox_queue_ptr->nr_tx = 0;
153 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800154}
155
156static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
157{
158 mailbox_queue_status_t empty_status;
159 uint8_t idx, nr_empty = 0;
160
161 if (!ns_queue) {
162 return;
163 }
164
David Hu2d2a2f12020-08-09 15:27:02 +0800165 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800166 /* Count the number of used slots when this tx arrives */
167 empty_status = ns_queue->empty_slots;
David Hu2d2a2f12020-08-09 15:27:02 +0800168 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800169
170 if (empty_status) {
171 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
172 if (empty_status & (0x1UL << idx)) {
173 nr_empty++;
174 }
175 }
176 }
177
David Hu675286d2020-08-03 14:19:09 +0800178 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800179 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800180 ns_queue->nr_tx++;
181 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800182}
183
184void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
185{
186 uint32_t nr_used_slots, nr_tx;
187
188 if (!mailbox_queue_ptr || !stats_res) {
189 return;
190 }
191
David Hu65cbfb82019-11-15 17:18:12 +0800192 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
193 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800194
195 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
196 nr_used_slots %= nr_tx;
197 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
198}
199#endif
200
David Hu1bd1c7b2020-05-09 14:13:20 +0800201static int32_t mailbox_tx_client_req(uint32_t call_type,
202 const struct psa_client_params_t *params,
203 int32_t client_id,
David Hu6730af22020-05-11 19:50:08 +0800204 uint8_t *slot_idx)
David Hud2753b32019-09-23 18:46:15 +0800205{
206 uint8_t idx;
207 struct mailbox_msg_t *msg_ptr;
David Hu06ebac72019-09-29 16:01:54 +0800208 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800209
David Hud2753b32019-09-23 18:46:15 +0800210 idx = acquire_empty_slot(mailbox_queue_ptr);
211 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
212 return MAILBOX_QUEUE_FULL;
213 }
214
David Hu65cbfb82019-11-15 17:18:12 +0800215#ifdef TFM_MULTI_CORE_TEST
216 mailbox_tx_stats_update(mailbox_queue_ptr);
217#endif
218
David Hud2753b32019-09-23 18:46:15 +0800219 /* Fill the mailbox message */
220 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
221
222 msg_ptr->call_type = call_type;
223 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
224 msg_ptr->client_id = client_id;
225
David Hu06ebac72019-09-29 16:01:54 +0800226 /*
227 * Fetch the current task handle. The task will be woken up according the
228 * handle value set in the owner field.
229 */
David Hu69e590e2020-05-12 17:19:21 +0800230 task_handle = tfm_ns_mailbox_os_get_task_handle();
David Hu06ebac72019-09-29 16:01:54 +0800231 set_msg_owner(idx, task_handle);
232
David Hud2753b32019-09-23 18:46:15 +0800233 tfm_ns_mailbox_hal_enter_critical();
234 set_queue_slot_pend(idx);
235 tfm_ns_mailbox_hal_exit_critical();
236
237 tfm_ns_mailbox_hal_notify_peer();
238
David Hu6730af22020-05-11 19:50:08 +0800239 *slot_idx = idx;
240
David Hu1bd1c7b2020-05-09 14:13:20 +0800241 return MAILBOX_SUCCESS;
David Hud2753b32019-09-23 18:46:15 +0800242}
243
David Hu6730af22020-05-11 19:50:08 +0800244static int32_t mailbox_rx_client_reply(uint8_t idx, int32_t *reply)
David Hud2753b32019-09-23 18:46:15 +0800245{
David Hud2753b32019-09-23 18:46:15 +0800246 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
247
David Hu06ebac72019-09-29 16:01:54 +0800248 /* Clear up the owner field */
249 set_msg_owner(idx, NULL);
250
David Hu2d2a2f12020-08-09 15:27:02 +0800251 ns_mailbox_spin_lock();
David Hu3684ee72019-11-12 18:43:34 +0800252 clear_queue_slot_woken(idx);
253 /*
254 * Make sure that the empty flag is set after all the other status flags are
255 * re-initialized.
256 */
257 set_queue_slot_empty(idx);
David Hu2d2a2f12020-08-09 15:27:02 +0800258 ns_mailbox_spin_unlock();
David Hud2753b32019-09-23 18:46:15 +0800259
260 return MAILBOX_SUCCESS;
261}
262
David Hu1bd1c7b2020-05-09 14:13:20 +0800263int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
264 const struct psa_client_params_t *params,
265 int32_t client_id,
266 int32_t *reply)
267{
David Hu6730af22020-05-11 19:50:08 +0800268 uint8_t slot_idx = NUM_MAILBOX_QUEUE_SLOT;
David Hu1bd1c7b2020-05-09 14:13:20 +0800269 int32_t reply_buf = 0x0;
270 int32_t ret;
271
272 if (!mailbox_queue_ptr) {
273 return MAILBOX_INIT_ERROR;
274 }
275
276 if (!params || !reply) {
277 return MAILBOX_INVAL_PARAMS;
278 }
279
David Hu69e590e2020-05-12 17:19:21 +0800280 if (tfm_ns_mailbox_os_lock_acquire() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800281 return MAILBOX_QUEUE_FULL;
282 }
283
284 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800285 ret = mailbox_tx_client_req(call_type, params, client_id, &slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800286 if (ret != MAILBOX_SUCCESS) {
287 goto exit;
288 }
289
David Hu6730af22020-05-11 19:50:08 +0800290 mailbox_wait_reply(slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800291
292 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800293 ret = mailbox_rx_client_reply(slot_idx, &reply_buf);
David Hu1bd1c7b2020-05-09 14:13:20 +0800294 if (ret == MAILBOX_SUCCESS) {
295 *reply = reply_buf;
296 }
297
298exit:
David Hu69e590e2020-05-12 17:19:21 +0800299 if (tfm_ns_mailbox_os_lock_release() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800300 return MAILBOX_GENERIC_ERROR;
301 }
302
303 return ret;
304}
305
306#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
307int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
David Hud2753b32019-09-23 18:46:15 +0800308{
309 uint8_t idx;
David Hu3684ee72019-11-12 18:43:34 +0800310 mailbox_queue_status_t replied_status;
311
312 if (!mailbox_queue_ptr) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800313 return MAILBOX_INIT_ERROR;
David Hu3684ee72019-11-12 18:43:34 +0800314 }
315
316 tfm_ns_mailbox_hal_enter_critical_isr();
317 replied_status = mailbox_queue_ptr->replied_slots;
David Hu94830372020-05-13 16:37:34 +0800318 clear_queue_slot_all_replied(replied_status);
David Hu3684ee72019-11-12 18:43:34 +0800319 tfm_ns_mailbox_hal_exit_critical_isr();
320
321 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800322 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800323 }
324
325 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
David Hu94830372020-05-13 16:37:34 +0800326 /*
327 * The reply has already received from SPE mailbox but
328 * the wake-up signal is not sent yet.
329 */
330 if (!(replied_status & (0x1UL << idx))) {
331 continue;
332 }
David Hu3684ee72019-11-12 18:43:34 +0800333
David Hu94830372020-05-13 16:37:34 +0800334 /* Set woken-up flag */
335 tfm_ns_mailbox_hal_enter_critical_isr();
336 set_queue_slot_woken(idx);
337 tfm_ns_mailbox_hal_exit_critical_isr();
338
339 tfm_ns_mailbox_os_wake_task_isr(mailbox_queue_ptr->queue[idx].owner);
340
341 replied_status &= ~(0x1UL << idx);
342 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800343 break;
David Hu94830372020-05-13 16:37:34 +0800344 }
David Hu3684ee72019-11-12 18:43:34 +0800345 }
346
David Hu1bd1c7b2020-05-09 14:13:20 +0800347 return MAILBOX_SUCCESS;
David Hu3684ee72019-11-12 18:43:34 +0800348}
David Hu2d2a2f12020-08-09 15:27:02 +0800349
350static inline bool mailbox_wait_reply_signal(uint8_t idx)
351{
352 bool is_set = false;
353
354 ns_mailbox_spin_lock();
355
356 if (is_queue_slot_woken(idx)) {
357 clear_queue_slot_woken(idx);
358 is_set = true;
359 }
360
361 ns_mailbox_spin_unlock();
362
363 return is_set;
364}
365#else /* TFM_MULTI_CORE_MULTI_CLIENT_CALL */
366static inline bool mailbox_wait_reply_signal(uint8_t idx)
367{
368 bool is_set = false;
369
370 tfm_ns_mailbox_hal_enter_critical();
371
372 if (is_queue_slot_replied(idx)) {
373 clear_queue_slot_replied(idx);
374 is_set = true;
375 }
376
377 tfm_ns_mailbox_hal_exit_critical();
378
379 return is_set;
380}
381#endif /* TFM_MULTI_CORE_MULTI_CLIENT_CALL */
382
383static int32_t mailbox_wait_reply(uint8_t idx)
384{
385 bool is_replied;
386
387 while (1) {
388 tfm_ns_mailbox_os_wait_reply();
389
390 /*
391 * Woken up from sleep
392 * Check the completed flag to make sure that the current thread is
393 * woken up by reply event, rather than other events.
394 */
395 /*
396 * It requires SVCall to access NS mailbox flags if NS mailbox is put
397 * in privileged mode.
398 * An alternative is to let NS thread allocate its own is_woken flag.
399 * But a spinlock-like mechanism is still required.
400 */
401 is_replied = mailbox_wait_reply_signal(idx);
402 if (is_replied) {
403 break;
404 }
405 }
406
407 return MAILBOX_SUCCESS;
408}
David Hu3684ee72019-11-12 18:43:34 +0800409
David Hud2753b32019-09-23 18:46:15 +0800410int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
411{
412 int32_t ret;
413
414 if (!queue) {
415 return MAILBOX_INVAL_PARAMS;
416 }
417
418 /*
419 * Further verification of mailbox queue address may be required according
420 * to non-secure memory assignment.
421 */
422
423 memset(queue, 0, sizeof(*queue));
424
425 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800426 queue->empty_slots =
427 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
428 queue->empty_slots +=
429 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800430
431 mailbox_queue_ptr = queue;
432
433 /* Platform specific initialization. */
434 ret = tfm_ns_mailbox_hal_init(queue);
David Hu69e590e2020-05-12 17:19:21 +0800435 if (ret != MAILBOX_SUCCESS) {
436 return ret;
437 }
438
439 ret = tfm_ns_mailbox_os_lock_init();
David Hud2753b32019-09-23 18:46:15 +0800440
David Hu65cbfb82019-11-15 17:18:12 +0800441#ifdef TFM_MULTI_CORE_TEST
442 tfm_ns_mailbox_tx_stats_init();
443#endif
444
David Hud2753b32019-09-23 18:46:15 +0800445 return ret;
446}