blob: f8f4298b9c37ab22f4b7dd0064dc3e618a4f1968 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu1bd1c7b2020-05-09 14:13:20 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hud2753b32019-09-23 18:46:15 +080011#include "tfm_ns_mailbox.h"
12
13/* The pointer to NSPE mailbox queue */
14static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
15
David Hu6730af22020-05-11 19:50:08 +080016static int32_t mailbox_wait_reply(uint8_t idx);
David Hu1bd1c7b2020-05-09 14:13:20 +080017
David Hud2753b32019-09-23 18:46:15 +080018static inline void clear_queue_slot_empty(uint8_t idx)
19{
20 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
21 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
22 }
23}
24
25static inline void set_queue_slot_empty(uint8_t idx)
26{
27 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
28 mailbox_queue_ptr->empty_slots |= (1 << idx);
29 }
30}
31
32static inline void set_queue_slot_pend(uint8_t idx)
33{
34 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
35 mailbox_queue_ptr->pend_slots |= (1 << idx);
36 }
37}
38
David Hud2753b32019-09-23 18:46:15 +080039static inline void clear_queue_slot_replied(uint8_t idx)
40{
41 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
42 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
43 }
44}
45
David Hu1bd1c7b2020-05-09 14:13:20 +080046static inline bool is_queue_slot_replied(uint8_t idx)
47{
48 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
49 return mailbox_queue_ptr->replied_slots & (1UL << idx);
50 }
51
52 return false;
53}
54
David Hu3684ee72019-11-12 18:43:34 +080055static inline void set_queue_slot_woken(uint8_t idx)
56{
57 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
58 mailbox_queue_ptr->queue[idx].is_woken = true;
59 }
60}
61
David Huf3e20472019-11-13 17:41:59 +080062static inline bool is_queue_slot_woken(uint8_t idx)
63{
64 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
65 return mailbox_queue_ptr->queue[idx].is_woken;
66 }
67
68 return false;
69}
70
71static inline void clear_queue_slot_woken(uint8_t idx)
72{
73 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
74 mailbox_queue_ptr->queue[idx].is_woken = false;
75 }
76}
77
David Hu1bd1c7b2020-05-09 14:13:20 +080078static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
David Hud2753b32019-09-23 18:46:15 +080079{
80 uint8_t idx;
81 mailbox_queue_status_t status;
82
83 tfm_ns_mailbox_hal_enter_critical();
84 status = queue->empty_slots;
85
86 if (!status) {
87 /* No empty slot */
88 tfm_ns_mailbox_hal_exit_critical();
89 return NUM_MAILBOX_QUEUE_SLOT;
90 }
91
92 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
93 if (status & (1 << idx)) {
94 break;
95 }
96 }
97
98 clear_queue_slot_empty(idx);
99
100 tfm_ns_mailbox_hal_exit_critical();
101
102 return idx;
103}
104
David Hu06ebac72019-09-29 16:01:54 +0800105static void set_msg_owner(uint8_t idx, const void *owner)
106{
107 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
108 mailbox_queue_ptr->queue[idx].owner = owner;
109 }
110}
111
David Hu65cbfb82019-11-15 17:18:12 +0800112#ifdef TFM_MULTI_CORE_TEST
David Hud86c8402020-08-12 17:58:55 +0800113/*
114 * When NSPE mailbox only covers a single non-secure core, spinlock is only
115 * required to disable IRQ.
116 */
117static inline void ns_mailbox_spin_lock(void)
118{
119 __disable_irq();
120}
121
122static inline void ns_mailbox_spin_unlock(void)
123{
124 __enable_irq();
125}
126
David Hu65cbfb82019-11-15 17:18:12 +0800127void tfm_ns_mailbox_tx_stats_init(void)
128{
129 if (!mailbox_queue_ptr) {
130 return;
131 }
132
David Hu65cbfb82019-11-15 17:18:12 +0800133 mailbox_queue_ptr->nr_tx = 0;
134 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800135}
136
137static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
138{
139 mailbox_queue_status_t empty_status;
140 uint8_t idx, nr_empty = 0;
141
142 if (!ns_queue) {
143 return;
144 }
145
146 tfm_ns_mailbox_hal_enter_critical();
David Hu65cbfb82019-11-15 17:18:12 +0800147 /* Count the number of used slots when this tx arrives */
148 empty_status = ns_queue->empty_slots;
149 tfm_ns_mailbox_hal_exit_critical();
150
151 if (empty_status) {
152 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
153 if (empty_status & (0x1UL << idx)) {
154 nr_empty++;
155 }
156 }
157 }
158
David Hu675286d2020-08-03 14:19:09 +0800159 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800160 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800161 ns_queue->nr_tx++;
162 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800163}
164
165void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
166{
167 uint32_t nr_used_slots, nr_tx;
168
169 if (!mailbox_queue_ptr || !stats_res) {
170 return;
171 }
172
David Hu65cbfb82019-11-15 17:18:12 +0800173 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
174 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800175
176 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
177 nr_used_slots %= nr_tx;
178 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
179}
180#endif
181
David Hu1bd1c7b2020-05-09 14:13:20 +0800182static int32_t mailbox_tx_client_req(uint32_t call_type,
183 const struct psa_client_params_t *params,
184 int32_t client_id,
David Hu6730af22020-05-11 19:50:08 +0800185 uint8_t *slot_idx)
David Hud2753b32019-09-23 18:46:15 +0800186{
187 uint8_t idx;
188 struct mailbox_msg_t *msg_ptr;
David Hu06ebac72019-09-29 16:01:54 +0800189 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800190
David Hud2753b32019-09-23 18:46:15 +0800191 idx = acquire_empty_slot(mailbox_queue_ptr);
192 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
193 return MAILBOX_QUEUE_FULL;
194 }
195
David Hu65cbfb82019-11-15 17:18:12 +0800196#ifdef TFM_MULTI_CORE_TEST
197 mailbox_tx_stats_update(mailbox_queue_ptr);
198#endif
199
David Hud2753b32019-09-23 18:46:15 +0800200 /* Fill the mailbox message */
201 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
202
203 msg_ptr->call_type = call_type;
204 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
205 msg_ptr->client_id = client_id;
206
David Hu06ebac72019-09-29 16:01:54 +0800207 /*
208 * Fetch the current task handle. The task will be woken up according the
209 * handle value set in the owner field.
210 */
David Hu69e590e2020-05-12 17:19:21 +0800211 task_handle = tfm_ns_mailbox_os_get_task_handle();
David Hu06ebac72019-09-29 16:01:54 +0800212 set_msg_owner(idx, task_handle);
213
David Hud2753b32019-09-23 18:46:15 +0800214 tfm_ns_mailbox_hal_enter_critical();
215 set_queue_slot_pend(idx);
216 tfm_ns_mailbox_hal_exit_critical();
217
218 tfm_ns_mailbox_hal_notify_peer();
219
David Hu6730af22020-05-11 19:50:08 +0800220 *slot_idx = idx;
221
David Hu1bd1c7b2020-05-09 14:13:20 +0800222 return MAILBOX_SUCCESS;
David Hud2753b32019-09-23 18:46:15 +0800223}
224
David Hu6730af22020-05-11 19:50:08 +0800225static int32_t mailbox_rx_client_reply(uint8_t idx, int32_t *reply)
David Hud2753b32019-09-23 18:46:15 +0800226{
David Hud2753b32019-09-23 18:46:15 +0800227 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
228
David Hu06ebac72019-09-29 16:01:54 +0800229 /* Clear up the owner field */
230 set_msg_owner(idx, NULL);
231
David Hud2753b32019-09-23 18:46:15 +0800232 tfm_ns_mailbox_hal_enter_critical();
David Hu3684ee72019-11-12 18:43:34 +0800233 clear_queue_slot_woken(idx);
234 /*
235 * Make sure that the empty flag is set after all the other status flags are
236 * re-initialized.
237 */
238 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800239 tfm_ns_mailbox_hal_exit_critical();
240
241 return MAILBOX_SUCCESS;
242}
243
David Hu1bd1c7b2020-05-09 14:13:20 +0800244int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
245 const struct psa_client_params_t *params,
246 int32_t client_id,
247 int32_t *reply)
248{
David Hu6730af22020-05-11 19:50:08 +0800249 uint8_t slot_idx = NUM_MAILBOX_QUEUE_SLOT;
David Hu1bd1c7b2020-05-09 14:13:20 +0800250 int32_t reply_buf = 0x0;
251 int32_t ret;
252
253 if (!mailbox_queue_ptr) {
254 return MAILBOX_INIT_ERROR;
255 }
256
257 if (!params || !reply) {
258 return MAILBOX_INVAL_PARAMS;
259 }
260
David Hu69e590e2020-05-12 17:19:21 +0800261 if (tfm_ns_mailbox_os_lock_acquire() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800262 return MAILBOX_QUEUE_FULL;
263 }
264
265 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800266 ret = mailbox_tx_client_req(call_type, params, client_id, &slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800267 if (ret != MAILBOX_SUCCESS) {
268 goto exit;
269 }
270
David Hu6730af22020-05-11 19:50:08 +0800271 mailbox_wait_reply(slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800272
273 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800274 ret = mailbox_rx_client_reply(slot_idx, &reply_buf);
David Hu1bd1c7b2020-05-09 14:13:20 +0800275 if (ret == MAILBOX_SUCCESS) {
276 *reply = reply_buf;
277 }
278
279exit:
David Hu69e590e2020-05-12 17:19:21 +0800280 if (tfm_ns_mailbox_os_lock_release() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800281 return MAILBOX_GENERIC_ERROR;
282 }
283
284 return ret;
285}
286
287#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
288int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
David Hud2753b32019-09-23 18:46:15 +0800289{
290 uint8_t idx;
David Hu3684ee72019-11-12 18:43:34 +0800291 mailbox_queue_status_t replied_status;
292
293 if (!mailbox_queue_ptr) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800294 return MAILBOX_INIT_ERROR;
David Hu3684ee72019-11-12 18:43:34 +0800295 }
296
297 tfm_ns_mailbox_hal_enter_critical_isr();
298 replied_status = mailbox_queue_ptr->replied_slots;
299 tfm_ns_mailbox_hal_exit_critical_isr();
300
301 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800302 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800303 }
304
305 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
306 /* Find the first replied message in queue */
307 if (replied_status & (0x1UL << idx)) {
308 tfm_ns_mailbox_hal_enter_critical_isr();
309 clear_queue_slot_replied(idx);
310 set_queue_slot_woken(idx);
311 tfm_ns_mailbox_hal_exit_critical_isr();
312
David Hu1bd1c7b2020-05-09 14:13:20 +0800313 break;
David Hu3684ee72019-11-12 18:43:34 +0800314 }
315 }
316
David Hu1bd1c7b2020-05-09 14:13:20 +0800317 /* In theory, it won't occur. Just in case */
David Hu6730af22020-05-11 19:50:08 +0800318 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800319 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800320 }
321
David Hu6730af22020-05-11 19:50:08 +0800322 tfm_ns_mailbox_os_wake_task_isr(mailbox_queue_ptr->queue[idx].owner);
David Hu1bd1c7b2020-05-09 14:13:20 +0800323
324 return MAILBOX_SUCCESS;
David Hu3684ee72019-11-12 18:43:34 +0800325}
David Hu1bd1c7b2020-05-09 14:13:20 +0800326#endif
David Hu3684ee72019-11-12 18:43:34 +0800327
David Hud2753b32019-09-23 18:46:15 +0800328int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
329{
330 int32_t ret;
331
332 if (!queue) {
333 return MAILBOX_INVAL_PARAMS;
334 }
335
336 /*
337 * Further verification of mailbox queue address may be required according
338 * to non-secure memory assignment.
339 */
340
341 memset(queue, 0, sizeof(*queue));
342
343 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800344 queue->empty_slots =
345 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
346 queue->empty_slots +=
347 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800348
349 mailbox_queue_ptr = queue;
350
351 /* Platform specific initialization. */
352 ret = tfm_ns_mailbox_hal_init(queue);
David Hu69e590e2020-05-12 17:19:21 +0800353 if (ret != MAILBOX_SUCCESS) {
354 return ret;
355 }
356
357 ret = tfm_ns_mailbox_os_lock_init();
David Hud2753b32019-09-23 18:46:15 +0800358
David Hu65cbfb82019-11-15 17:18:12 +0800359#ifdef TFM_MULTI_CORE_TEST
360 tfm_ns_mailbox_tx_stats_init();
361#endif
362
David Hud2753b32019-09-23 18:46:15 +0800363 return ret;
364}
David Huf3e20472019-11-13 17:41:59 +0800365
David Hu6730af22020-05-11 19:50:08 +0800366static int32_t mailbox_wait_reply(uint8_t idx)
David Huf3e20472019-11-13 17:41:59 +0800367{
David Huf3e20472019-11-13 17:41:59 +0800368 while (1) {
David Hu6730af22020-05-11 19:50:08 +0800369 tfm_ns_mailbox_os_wait_reply();
David Huf3e20472019-11-13 17:41:59 +0800370
371 /*
372 * Woken up from sleep
373 * Check the completed flag to make sure that the current thread is
374 * woken up by reply event, rather than other events.
375 */
376 tfm_ns_mailbox_hal_enter_critical();
David Hu1bd1c7b2020-05-09 14:13:20 +0800377 /*
378 * It requires SVCall to access NS mailbox flags if NS mailbox is put
379 * in privileged mode.
380 * An alternative is to let NS thread allocate its own is_woken flag.
381 * But a spinlock-like mechanism is still required.
382 */
383#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
David Huf3e20472019-11-13 17:41:59 +0800384 if (is_queue_slot_woken(idx)) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800385 clear_queue_slot_woken(idx);
David Huf3e20472019-11-13 17:41:59 +0800386 break;
387 }
David Hu1bd1c7b2020-05-09 14:13:20 +0800388#else
389 if (is_queue_slot_replied(idx)) {
390 clear_queue_slot_replied(idx);
391 break;
392 }
393#endif
David Huf3e20472019-11-13 17:41:59 +0800394 tfm_ns_mailbox_hal_exit_critical();
395 }
396
David Hu1bd1c7b2020-05-09 14:13:20 +0800397 tfm_ns_mailbox_hal_exit_critical();
398
David Huf3e20472019-11-13 17:41:59 +0800399 return MAILBOX_SUCCESS;
400}