blob: 60758b2d7d30533d50ad7e2b51799793a32a876d [file] [log] [blame]
David Hud2753b32019-09-23 18:46:15 +08001/*
David Hu06ebac72019-09-29 16:01:54 +08002 * Copyright (c) 2019-2020, Arm Limited. All rights reserved.
David Hud2753b32019-09-23 18:46:15 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <string.h>
David Hu675286d2020-08-03 14:19:09 +08009
10#include "cmsis_compiler.h"
David Hud2753b32019-09-23 18:46:15 +080011#include "tfm_ns_mailbox.h"
David Hu06ebac72019-09-29 16:01:54 +080012#include "tfm_plat_ns.h"
David Hud2753b32019-09-23 18:46:15 +080013
14/* The pointer to NSPE mailbox queue */
15static struct ns_mailbox_queue_t *mailbox_queue_ptr = NULL;
16
17static inline void clear_queue_slot_empty(uint8_t idx)
18{
19 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
20 mailbox_queue_ptr->empty_slots &= ~(1 << idx);
21 }
22}
23
24static inline void set_queue_slot_empty(uint8_t idx)
25{
26 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
27 mailbox_queue_ptr->empty_slots |= (1 << idx);
28 }
29}
30
31static inline void set_queue_slot_pend(uint8_t idx)
32{
33 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
34 mailbox_queue_ptr->pend_slots |= (1 << idx);
35 }
36}
37
38static inline int32_t get_mailbox_msg_handle(uint8_t idx,
39 mailbox_msg_handle_t *handle)
40{
41 if ((idx >= NUM_MAILBOX_QUEUE_SLOT) || !handle) {
42 return MAILBOX_INVAL_PARAMS;
43 }
44
45 *handle = (mailbox_msg_handle_t)(idx + 1);
46
47 return MAILBOX_SUCCESS;
48}
49
50static inline int32_t get_mailbox_msg_idx(mailbox_msg_handle_t handle,
51 uint8_t *idx)
52{
53 if ((handle == MAILBOX_MSG_NULL_HANDLE) || !idx) {
54 return MAILBOX_INVAL_PARAMS;
55 }
56
57 *idx = (uint8_t)(handle - 1);
58
59 return MAILBOX_SUCCESS;
60}
61
62static inline void clear_queue_slot_replied(uint8_t idx)
63{
64 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
65 mailbox_queue_ptr->replied_slots &= ~(1 << idx);
66 }
67}
68
David Hu3684ee72019-11-12 18:43:34 +080069static inline void set_queue_slot_woken(uint8_t idx)
70{
71 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
72 mailbox_queue_ptr->queue[idx].is_woken = true;
73 }
74}
75
David Huf3e20472019-11-13 17:41:59 +080076static inline bool is_queue_slot_woken(uint8_t idx)
77{
78 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
79 return mailbox_queue_ptr->queue[idx].is_woken;
80 }
81
82 return false;
83}
84
85static inline void clear_queue_slot_woken(uint8_t idx)
86{
87 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
88 mailbox_queue_ptr->queue[idx].is_woken = false;
89 }
90}
91
David Hud2753b32019-09-23 18:46:15 +080092static uint8_t acquire_empty_slot(const struct ns_mailbox_queue_t *queue)
93{
94 uint8_t idx;
95 mailbox_queue_status_t status;
96
97 tfm_ns_mailbox_hal_enter_critical();
98 status = queue->empty_slots;
99
100 if (!status) {
101 /* No empty slot */
102 tfm_ns_mailbox_hal_exit_critical();
103 return NUM_MAILBOX_QUEUE_SLOT;
104 }
105
106 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
107 if (status & (1 << idx)) {
108 break;
109 }
110 }
111
112 clear_queue_slot_empty(idx);
113
114 tfm_ns_mailbox_hal_exit_critical();
115
116 return idx;
117}
118
David Hu06ebac72019-09-29 16:01:54 +0800119static void set_msg_owner(uint8_t idx, const void *owner)
120{
121 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
122 mailbox_queue_ptr->queue[idx].owner = owner;
123 }
124}
125
David Hu65cbfb82019-11-15 17:18:12 +0800126#ifdef TFM_MULTI_CORE_TEST
David Hud86c8402020-08-12 17:58:55 +0800127/*
128 * When NSPE mailbox only covers a single non-secure core, spinlock is only
129 * required to disable IRQ.
130 */
131static inline void ns_mailbox_spin_lock(void)
132{
133 __disable_irq();
134}
135
136static inline void ns_mailbox_spin_unlock(void)
137{
138 __enable_irq();
139}
140
David Hu65cbfb82019-11-15 17:18:12 +0800141void tfm_ns_mailbox_tx_stats_init(void)
142{
143 if (!mailbox_queue_ptr) {
144 return;
145 }
146
David Hu65cbfb82019-11-15 17:18:12 +0800147 mailbox_queue_ptr->nr_tx = 0;
148 mailbox_queue_ptr->nr_used_slots = 0;
David Hu65cbfb82019-11-15 17:18:12 +0800149}
150
151static void mailbox_tx_stats_update(struct ns_mailbox_queue_t *ns_queue)
152{
153 mailbox_queue_status_t empty_status;
154 uint8_t idx, nr_empty = 0;
155
156 if (!ns_queue) {
157 return;
158 }
159
160 tfm_ns_mailbox_hal_enter_critical();
David Hu65cbfb82019-11-15 17:18:12 +0800161 /* Count the number of used slots when this tx arrives */
162 empty_status = ns_queue->empty_slots;
163 tfm_ns_mailbox_hal_exit_critical();
164
165 if (empty_status) {
166 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
167 if (empty_status & (0x1UL << idx)) {
168 nr_empty++;
169 }
170 }
171 }
172
David Hu675286d2020-08-03 14:19:09 +0800173 ns_mailbox_spin_lock();
David Hu65cbfb82019-11-15 17:18:12 +0800174 ns_queue->nr_used_slots += (NUM_MAILBOX_QUEUE_SLOT - nr_empty);
David Hu675286d2020-08-03 14:19:09 +0800175 ns_queue->nr_tx++;
176 ns_mailbox_spin_unlock();
David Hu65cbfb82019-11-15 17:18:12 +0800177}
178
179void tfm_ns_mailbox_stats_avg_slot(struct ns_mailbox_stats_res_t *stats_res)
180{
181 uint32_t nr_used_slots, nr_tx;
182
183 if (!mailbox_queue_ptr || !stats_res) {
184 return;
185 }
186
David Hu65cbfb82019-11-15 17:18:12 +0800187 nr_used_slots = mailbox_queue_ptr->nr_used_slots;
188 nr_tx = mailbox_queue_ptr->nr_tx;
David Hu65cbfb82019-11-15 17:18:12 +0800189
190 stats_res->avg_nr_slots = nr_used_slots / nr_tx;
191 nr_used_slots %= nr_tx;
192 stats_res->avg_nr_slots_tenths = nr_used_slots * 10 / nr_tx;
193}
194#endif
195
David Hud2753b32019-09-23 18:46:15 +0800196mailbox_msg_handle_t tfm_ns_mailbox_tx_client_req(uint32_t call_type,
197 const struct psa_client_params_t *params,
198 int32_t client_id)
199{
200 uint8_t idx;
201 struct mailbox_msg_t *msg_ptr;
202 mailbox_msg_handle_t handle;
David Hu06ebac72019-09-29 16:01:54 +0800203 const void *task_handle;
David Hud2753b32019-09-23 18:46:15 +0800204
205 if (!mailbox_queue_ptr) {
206 return MAILBOX_MSG_NULL_HANDLE;
207 }
208
209 if (!params) {
210 return MAILBOX_MSG_NULL_HANDLE;
211 }
212
213 idx = acquire_empty_slot(mailbox_queue_ptr);
214 if (idx >= NUM_MAILBOX_QUEUE_SLOT) {
215 return MAILBOX_QUEUE_FULL;
216 }
217
David Hu65cbfb82019-11-15 17:18:12 +0800218#ifdef TFM_MULTI_CORE_TEST
219 mailbox_tx_stats_update(mailbox_queue_ptr);
220#endif
221
David Hud2753b32019-09-23 18:46:15 +0800222 /* Fill the mailbox message */
223 msg_ptr = &mailbox_queue_ptr->queue[idx].msg;
224
225 msg_ptr->call_type = call_type;
226 memcpy(&msg_ptr->params, params, sizeof(msg_ptr->params));
227 msg_ptr->client_id = client_id;
228
David Hu06ebac72019-09-29 16:01:54 +0800229 /*
230 * Fetch the current task handle. The task will be woken up according the
231 * handle value set in the owner field.
232 */
233 task_handle = tfm_ns_mailbox_get_task_handle();
234 set_msg_owner(idx, task_handle);
235
David Hud2753b32019-09-23 18:46:15 +0800236 get_mailbox_msg_handle(idx, &handle);
237
238 tfm_ns_mailbox_hal_enter_critical();
239 set_queue_slot_pend(idx);
240 tfm_ns_mailbox_hal_exit_critical();
241
242 tfm_ns_mailbox_hal_notify_peer();
243
244 return handle;
245}
246
247int32_t tfm_ns_mailbox_rx_client_reply(mailbox_msg_handle_t handle,
248 int32_t *reply)
249{
250 uint8_t idx;
251 int32_t ret;
252
253 if (!mailbox_queue_ptr) {
254 return MAILBOX_INVAL_PARAMS;
255 }
256
257 if ((handle == MAILBOX_MSG_NULL_HANDLE) || (!reply)) {
258 return MAILBOX_INVAL_PARAMS;
259 }
260
261 ret = get_mailbox_msg_idx(handle, &idx);
262 if (ret != MAILBOX_SUCCESS) {
263 return ret;
264 }
265
266 *reply = mailbox_queue_ptr->queue[idx].reply.return_val;
267
David Hu06ebac72019-09-29 16:01:54 +0800268 /* Clear up the owner field */
269 set_msg_owner(idx, NULL);
270
David Hud2753b32019-09-23 18:46:15 +0800271 tfm_ns_mailbox_hal_enter_critical();
David Hud2753b32019-09-23 18:46:15 +0800272 clear_queue_slot_replied(idx);
David Hu3684ee72019-11-12 18:43:34 +0800273 clear_queue_slot_woken(idx);
274 /*
275 * Make sure that the empty flag is set after all the other status flags are
276 * re-initialized.
277 */
278 set_queue_slot_empty(idx);
David Hud2753b32019-09-23 18:46:15 +0800279 tfm_ns_mailbox_hal_exit_critical();
280
281 return MAILBOX_SUCCESS;
282}
283
284bool tfm_ns_mailbox_is_msg_replied(mailbox_msg_handle_t handle)
285{
286 uint8_t idx;
287 int32_t ret;
288 mailbox_queue_status_t status;
289
290 if (!mailbox_queue_ptr) {
291 return false;
292 }
293
294 if (handle == MAILBOX_MSG_NULL_HANDLE) {
295 return false;
296 }
297
298 ret = get_mailbox_msg_idx(handle, &idx);
299 if (ret != MAILBOX_SUCCESS) {
300 return false;
301 }
302
303 tfm_ns_mailbox_hal_enter_critical();
304 status = mailbox_queue_ptr->replied_slots;
305 tfm_ns_mailbox_hal_exit_critical();
306
307 if (status & (1 << idx)) {
308 return true;
309 }
310
311 return false;
312}
313
David Hu3684ee72019-11-12 18:43:34 +0800314mailbox_msg_handle_t tfm_ns_mailbox_fetch_reply_msg_isr(void)
315{
316 uint8_t idx;
317 mailbox_msg_handle_t handle;
318 mailbox_queue_status_t replied_status;
319
320 if (!mailbox_queue_ptr) {
321 return MAILBOX_MSG_NULL_HANDLE;
322 }
323
324 tfm_ns_mailbox_hal_enter_critical_isr();
325 replied_status = mailbox_queue_ptr->replied_slots;
326 tfm_ns_mailbox_hal_exit_critical_isr();
327
328 if (!replied_status) {
329 return MAILBOX_MSG_NULL_HANDLE;
330 }
331
332 for (idx = 0; idx < NUM_MAILBOX_QUEUE_SLOT; idx++) {
333 /* Find the first replied message in queue */
334 if (replied_status & (0x1UL << idx)) {
335 tfm_ns_mailbox_hal_enter_critical_isr();
336 clear_queue_slot_replied(idx);
337 set_queue_slot_woken(idx);
338 tfm_ns_mailbox_hal_exit_critical_isr();
339
340 if (get_mailbox_msg_handle(idx, &handle) == MAILBOX_SUCCESS) {
341 return handle;
342 }
343 }
344 }
345
346 return MAILBOX_MSG_NULL_HANDLE;
347}
348
349const void *tfm_ns_mailbox_get_msg_owner(mailbox_msg_handle_t handle)
350{
351 uint8_t idx;
352
353 if (get_mailbox_msg_idx(handle, &idx) != MAILBOX_SUCCESS) {
354 return NULL;
355 }
356
357 if (idx < NUM_MAILBOX_QUEUE_SLOT) {
358 return mailbox_queue_ptr->queue[idx].owner;
359 }
360
361 return NULL;
362}
363
David Hud2753b32019-09-23 18:46:15 +0800364int32_t tfm_ns_mailbox_init(struct ns_mailbox_queue_t *queue)
365{
366 int32_t ret;
367
368 if (!queue) {
369 return MAILBOX_INVAL_PARAMS;
370 }
371
372 /*
373 * Further verification of mailbox queue address may be required according
374 * to non-secure memory assignment.
375 */
376
377 memset(queue, 0, sizeof(*queue));
378
379 /* Initialize empty bitmask */
David Hude3f79f2019-11-14 16:56:51 +0800380 queue->empty_slots =
381 (mailbox_queue_status_t)((1UL << (NUM_MAILBOX_QUEUE_SLOT - 1)) - 1);
382 queue->empty_slots +=
383 (mailbox_queue_status_t)(1UL << (NUM_MAILBOX_QUEUE_SLOT - 1));
David Hud2753b32019-09-23 18:46:15 +0800384
385 mailbox_queue_ptr = queue;
386
387 /* Platform specific initialization. */
388 ret = tfm_ns_mailbox_hal_init(queue);
389
David Hu65cbfb82019-11-15 17:18:12 +0800390#ifdef TFM_MULTI_CORE_TEST
391 tfm_ns_mailbox_tx_stats_init();
392#endif
393
David Hud2753b32019-09-23 18:46:15 +0800394 return ret;
395}
David Huf3e20472019-11-13 17:41:59 +0800396
397#ifdef TFM_MULTI_CORE_MULTI_CLIENT_CALL
398int32_t tfm_ns_mailbox_wait_reply(mailbox_msg_handle_t handle)
399{
400 uint8_t idx;
401 int32_t ret;
402
403 if (!mailbox_queue_ptr) {
404 return MAILBOX_INVAL_PARAMS;
405 }
406
407 if (handle == MAILBOX_MSG_NULL_HANDLE) {
408 return MAILBOX_INVAL_PARAMS;
409 }
410
411 ret = get_mailbox_msg_idx(handle, &idx);
412 if (ret != MAILBOX_SUCCESS) {
413 return ret;
414 }
415
416 while (1) {
417 tfm_ns_mailbox_hal_wait_reply(handle);
418
419 /*
420 * Woken up from sleep
421 * Check the completed flag to make sure that the current thread is
422 * woken up by reply event, rather than other events.
423 */
424 tfm_ns_mailbox_hal_enter_critical();
425 if (is_queue_slot_woken(idx)) {
426 tfm_ns_mailbox_hal_exit_critical();
427 break;
428 }
429 tfm_ns_mailbox_hal_exit_critical();
430 }
431
432 return MAILBOX_SUCCESS;
433}
434#endif