blob: 97c2bfa4cc8cbdc490ae0e1961bb7ed8e88288a4 [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu1bd1c7b2020-05-09 14:13:20 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hud2753b32019-09-23 18:46:15 +080011#include "tfm_ns_mailbox.h"
12
13/* The pointer to NSPE mailbox queue */
14static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
15
David Hu6730af22020-05-11 19:50:08 +080016static int32_t mailbox_wait_reply(uint8_t idx);
David Hu1bd1c7b2020-05-09 14:13:20 +080017
David Hud2753b32019-09-23 18:46:15 +080018static inline void clear_queue_slot_empty(uint8_t idx)
19{
20 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
21 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
22 }
23}
24
25static inline void set_queue_slot_empty(uint8_t idx)
26{
27 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
28 mailbox_queue_ptr->empty_slots |= (1 << idx);
29 }
30}
31
32static inline void set_queue_slot_pend(uint8_t idx)
33{
34 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
35 mailbox_queue_ptr->pend_slots |= (1 << idx);
36 }
37}
38
David Hud2753b32019-09-23 18:46:15 +080039static inline void clear_queue_slot_replied(uint8_t idx)
40{
41 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
42 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
43 }
44}
45
David Hu94830372020-05-13 16:37:34 +080046static inline void clear_queue_slot_all_replied(mailbox_queue_status_t status)
47{
48 mailbox_queue_ptr->replied_slots &= ~status;
49}
50
David Hu1bd1c7b2020-05-09 14:13:20 +080051static inline bool is_queue_slot_replied(uint8_t idx)
52{
53 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
54 return mailbox_queue_ptr->replied_slots & (1UL << idx);
55 }
56
57 return false;
58}
59
David Hu3684ee72019-11-12 18:43:34 +080060static inline void set_queue_slot_woken(uint8_t idx)
61{
62 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
63 mailbox_queue_ptr->queue[idx].is_woken = true;
64 }
65}
66
David Huf3e20472019-11-13 17:41:59 +080067static inline bool is_queue_slot_woken(uint8_t idx)
68{
69 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
70 return mailbox_queue_ptr->queue[idx].is_woken;
71 }
72
73 return false;
74}
75
76static inline void clear_queue_slot_woken(uint8_t idx)
77{
78 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
79 mailbox_queue_ptr->queue[idx].is_woken = false;
80 }
81}
82
David Hu1bd1c7b2020-05-09 14:13:20 +080083static uint8_t acquire_empty_slot(struct ns_mailbox_queue_t *queue)
David Hud2753b32019-09-23 18:46:15 +080084{
85 uint8_t idx;
86 mailbox_queue_status_t status;
87
88 tfm_ns_mailbox_hal_enter_critical();
89 status = queue->empty_slots;
90
91 if (!status) {
92 /* No empty slot */
93 tfm_ns_mailbox_hal_exit_critical();
94 return NUM_MAILBOX_QUEUE_SLOT;
95 }
96
97 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
98 if (status & (1 << idx)) {
99 break;
100 }
101 }
102
103 clear_queue_slot_empty(idx);
104
105 tfm_ns_mailbox_hal_exit_critical();
106
107 return idx;
108}
109
David Hu06ebac72019-09-29 16:01:54 +0800110static void set_msg_owner(uint8_t idx, const void *owner)
111{
112 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
113 mailbox_queue_ptr->queue[idx].owner = owner;
114 }
115}
116
David Hu65cbfb82019-11-15 17:18:12 +0800117#ifdef TFM_MULTI_CORE_TEST
David Hud86c8402020-08-12 17:58:55 +0800118/*
119 * When NSPE mailbox only covers a single non-secure core, spinlock is only
120 * required to disable IRQ.
121 */
122static inline void ns_mailbox_spin_lock(void)
123{
124 __disable_irq();
125}
126
127static inline void ns_mailbox_spin_unlock(void)
128{
129 __enable_irq();
130}
131
David Hu65cbfb82019-11-15 17:18:12 +0800132void tfm_ns_mailbox_tx_stats_init(void)
133{
134 if (!mailbox_queue_ptr) {
135 return;
136 }
137
David Hu65cbfb82019-11-15 17:18:12 +0800138 mailbox_queue_ptr->nr_tx = 0;
139 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800140}
141
142static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
143{
144 mailbox_queue_status_t empty_status;
145 uint8_t idx, nr_empty = 0;
146
147 if (!ns_queue) {
148 return;
149 }
150
151 tfm_ns_mailbox_hal_enter_critical();
David Hu65cbfb82019-11-15 17:18:12 +0800152 /* Count the number of used slots when this tx arrives */
153 empty_status = ns_queue->empty_slots;
154 tfm_ns_mailbox_hal_exit_critical();
155
156 if (empty_status) {
157 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
158 if (empty_status & (0x1UL << idx)) {
159 nr_empty++;
160 }
161 }
162 }
163
David Hu675286d2020-08-03 14:19:09 +0800164 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800165 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800166 ns_queue->nr_tx++;
167 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800168}
169
170void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
171{
172 uint32_t nr_used_slots, nr_tx;
173
174 if (!mailbox_queue_ptr || !stats_res) {
175 return;
176 }
177
David Hu65cbfb82019-11-15 17:18:12 +0800178 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
179 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800180
181 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
182 nr_used_slots %= nr_tx;
183 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
184}
185#endif
186
David Hu1bd1c7b2020-05-09 14:13:20 +0800187static int32_t mailbox_tx_client_req(uint32_t call_type,
188 const struct psa_client_params_t *params,
189 int32_t client_id,
David Hu6730af22020-05-11 19:50:08 +0800190 uint8_t *slot_idx)
David Hud2753b32019-09-23 18:46:15 +0800191{
192 uint8_t idx;
193 struct mailbox_msg_t *msg_ptr;
David Hu06ebac72019-09-29 16:01:54 +0800194 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800195
David Hud2753b32019-09-23 18:46:15 +0800196 idx = acquire_empty_slot(mailbox_queue_ptr);
197 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
198 return MAILBOX_QUEUE_FULL;
199 }
200
David Hu65cbfb82019-11-15 17:18:12 +0800201#ifdef TFM_MULTI_CORE_TEST
202 mailbox_tx_stats_update(mailbox_queue_ptr);
203#endif
204
David Hud2753b32019-09-23 18:46:15 +0800205 /* Fill the mailbox message */
206 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
207
208 msg_ptr->call_type = call_type;
209 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
210 msg_ptr->client_id = client_id;
211
David Hu06ebac72019-09-29 16:01:54 +0800212 /*
213 * Fetch the current task handle. The task will be woken up according the
214 * handle value set in the owner field.
215 */
David Hu69e590e2020-05-12 17:19:21 +0800216 task_handle = tfm_ns_mailbox_os_get_task_handle();
David Hu06ebac72019-09-29 16:01:54 +0800217 set_msg_owner(idx, task_handle);
218
David Hud2753b32019-09-23 18:46:15 +0800219 tfm_ns_mailbox_hal_enter_critical();
220 set_queue_slot_pend(idx);
221 tfm_ns_mailbox_hal_exit_critical();
222
223 tfm_ns_mailbox_hal_notify_peer();
224
David Hu6730af22020-05-11 19:50:08 +0800225 *slot_idx = idx;
226
David Hu1bd1c7b2020-05-09 14:13:20 +0800227 return MAILBOX_SUCCESS;
David Hud2753b32019-09-23 18:46:15 +0800228}
229
David Hu6730af22020-05-11 19:50:08 +0800230static int32_t mailbox_rx_client_reply(uint8_t idx, int32_t *reply)
David Hud2753b32019-09-23 18:46:15 +0800231{
David Hud2753b32019-09-23 18:46:15 +0800232 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
233
David Hu06ebac72019-09-29 16:01:54 +0800234 /* Clear up the owner field */
235 set_msg_owner(idx, NULL);
236
David Hud2753b32019-09-23 18:46:15 +0800237 tfm_ns_mailbox_hal_enter_critical();
David Hu3684ee72019-11-12 18:43:34 +0800238 clear_queue_slot_woken(idx);
239 /*
240 * Make sure that the empty flag is set after all the other status flags are
241 * re-initialized.
242 */
243 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800244 tfm_ns_mailbox_hal_exit_critical();
245
246 return MAILBOX_SUCCESS;
247}
248
David Hu1bd1c7b2020-05-09 14:13:20 +0800249int32_t tfm_ns_mailbox_client_call(uint32_t call_type,
250 const struct psa_client_params_t *params,
251 int32_t client_id,
252 int32_t *reply)
253{
David Hu6730af22020-05-11 19:50:08 +0800254 uint8_t slot_idx = NUM_MAILBOX_QUEUE_SLOT;
David Hu1bd1c7b2020-05-09 14:13:20 +0800255 int32_t reply_buf = 0x0;
256 int32_t ret;
257
258 if (!mailbox_queue_ptr) {
259 return MAILBOX_INIT_ERROR;
260 }
261
262 if (!params || !reply) {
263 return MAILBOX_INVAL_PARAMS;
264 }
265
David Hu69e590e2020-05-12 17:19:21 +0800266 if (tfm_ns_mailbox_os_lock_acquire() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800267 return MAILBOX_QUEUE_FULL;
268 }
269
270 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800271 ret = mailbox_tx_client_req(call_type, params, client_id, &slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800272 if (ret != MAILBOX_SUCCESS) {
273 goto exit;
274 }
275
David Hu6730af22020-05-11 19:50:08 +0800276 mailbox_wait_reply(slot_idx);
David Hu1bd1c7b2020-05-09 14:13:20 +0800277
278 /* It requires SVCall if NS mailbox is put in privileged mode. */
David Hu6730af22020-05-11 19:50:08 +0800279 ret = mailbox_rx_client_reply(slot_idx, &reply_buf);
David Hu1bd1c7b2020-05-09 14:13:20 +0800280 if (ret == MAILBOX_SUCCESS) {
281 *reply = reply_buf;
282 }
283
284exit:
David Hu69e590e2020-05-12 17:19:21 +0800285 if (tfm_ns_mailbox_os_lock_release() != MAILBOX_SUCCESS) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800286 return MAILBOX_GENERIC_ERROR;
287 }
288
289 return ret;
290}
291
292#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
293int32_t tfm_ns_mailbox_wake_reply_owner_isr(void)
David Hud2753b32019-09-23 18:46:15 +0800294{
295 uint8_t idx;
David Hu3684ee72019-11-12 18:43:34 +0800296 mailbox_queue_status_t replied_status;
297
298 if (!mailbox_queue_ptr) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800299 return MAILBOX_INIT_ERROR;
David Hu3684ee72019-11-12 18:43:34 +0800300 }
301
302 tfm_ns_mailbox_hal_enter_critical_isr();
303 replied_status = mailbox_queue_ptr->replied_slots;
David Hu94830372020-05-13 16:37:34 +0800304 clear_queue_slot_all_replied(replied_status);
David Hu3684ee72019-11-12 18:43:34 +0800305 tfm_ns_mailbox_hal_exit_critical_isr();
306
307 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800308 return MAILBOX_NO_PEND_EVENT;
David Hu3684ee72019-11-12 18:43:34 +0800309 }
310
311 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
David Hu94830372020-05-13 16:37:34 +0800312 /*
313 * The reply has already received from SPE mailbox but
314 * the wake-up signal is not sent yet.
315 */
316 if (!(replied_status & (0x1UL << idx))) {
317 continue;
318 }
David Hu3684ee72019-11-12 18:43:34 +0800319
David Hu94830372020-05-13 16:37:34 +0800320 /* Set woken-up flag */
321 tfm_ns_mailbox_hal_enter_critical_isr();
322 set_queue_slot_woken(idx);
323 tfm_ns_mailbox_hal_exit_critical_isr();
324
325 tfm_ns_mailbox_os_wake_task_isr(mailbox_queue_ptr->queue[idx].owner);
326
327 replied_status &= ~(0x1UL << idx);
328 if (!replied_status) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800329 break;
David Hu94830372020-05-13 16:37:34 +0800330 }
David Hu3684ee72019-11-12 18:43:34 +0800331 }
332
David Hu1bd1c7b2020-05-09 14:13:20 +0800333 return MAILBOX_SUCCESS;
David Hu3684ee72019-11-12 18:43:34 +0800334}
David Hu1bd1c7b2020-05-09 14:13:20 +0800335#endif
David Hu3684ee72019-11-12 18:43:34 +0800336
David Hud2753b32019-09-23 18:46:15 +0800337int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
338{
339 int32_t ret;
340
341 if (!queue) {
342 return MAILBOX_INVAL_PARAMS;
343 }
344
345 /*
346 * Further verification of mailbox queue address may be required according
347 * to non-secure memory assignment.
348 */
349
350 memset(queue, 0, sizeof(*queue));
351
352 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800353 queue->empty_slots =
354 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
355 queue->empty_slots +=
356 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800357
358 mailbox_queue_ptr = queue;
359
360 /* Platform specific initialization. */
361 ret = tfm_ns_mailbox_hal_init(queue);
David Hu69e590e2020-05-12 17:19:21 +0800362 if (ret != MAILBOX_SUCCESS) {
363 return ret;
364 }
365
366 ret = tfm_ns_mailbox_os_lock_init();
David Hud2753b32019-09-23 18:46:15 +0800367
David Hu65cbfb82019-11-15 17:18:12 +0800368#ifdef TFM_MULTI_CORE_TEST
369 tfm_ns_mailbox_tx_stats_init();
370#endif
371
David Hud2753b32019-09-23 18:46:15 +0800372 return ret;
373}
David Huf3e20472019-11-13 17:41:59 +0800374
David Hu6730af22020-05-11 19:50:08 +0800375static int32_t mailbox_wait_reply(uint8_t idx)
David Huf3e20472019-11-13 17:41:59 +0800376{
David Huf3e20472019-11-13 17:41:59 +0800377 while (1) {
David Hu6730af22020-05-11 19:50:08 +0800378 tfm_ns_mailbox_os_wait_reply();
David Huf3e20472019-11-13 17:41:59 +0800379
380 /*
381 * Woken up from sleep
382 * Check the completed flag to make sure that the current thread is
383 * woken up by reply event, rather than other events.
384 */
385 tfm_ns_mailbox_hal_enter_critical();
David Hu1bd1c7b2020-05-09 14:13:20 +0800386 /*
387 * It requires SVCall to access NS mailbox flags if NS mailbox is put
388 * in privileged mode.
389 * An alternative is to let NS thread allocate its own is_woken flag.
390 * But a spinlock-like mechanism is still required.
391 */
392#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
David Huf3e20472019-11-13 17:41:59 +0800393 if (is_queue_slot_woken(idx)) {
David Hu1bd1c7b2020-05-09 14:13:20 +0800394 clear_queue_slot_woken(idx);
David Huf3e20472019-11-13 17:41:59 +0800395 break;
396 }
David Hu1bd1c7b2020-05-09 14:13:20 +0800397#else
398 if (is_queue_slot_replied(idx)) {
399 clear_queue_slot_replied(idx);
400 break;
401 }
402#endif
David Huf3e20472019-11-13 17:41:59 +0800403 tfm_ns_mailbox_hal_exit_critical();
404 }
405
David Hu1bd1c7b2020-05-09 14:13:20 +0800406 tfm_ns_mailbox_hal_exit_critical();
407
David Huf3e20472019-11-13 17:41:59 +0800408 return MAILBOX_SUCCESS;
409}