blob: 9014619e2d688116114d4d7eb003ee7e24c71ae9 [file] [log] [blame]
Ken Liu5d73c872021-08-19 19:23:17 +08001/*
2 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <stdint.h>
9#include "thread.h"
10#include "tfm_arch.h"
11#include "utilities.h"
12
Mingyang Sun4f08f4d2021-09-10 17:41:28 +080013/* Declaration of current thread pointer. */
14struct thread_t *p_curr_thrd;
15
Ken Liu5d73c872021-08-19 19:23:17 +080016/* Force ZERO in case ZI(bss) clear is missing. */
17static struct thread_t *p_thrd_head = NULL; /* Point to the first thread. */
18static struct thread_t *p_rnbl_head = NULL; /* Point to the first runnable. */
19
20/* Define Macro to fetch global to support future expansion (PERCPU e.g.) */
21#define LIST_HEAD p_thrd_head
22#define RNBL_HEAD p_rnbl_head
23
24struct thread_t *thrd_next(void)
25{
26 struct thread_t *p_thrd = RNBL_HEAD;
27 /*
28 * First runnable thread has highest priority since threads are
29 * sorted by priority.
30 */
31 while (p_thrd && p_thrd->state != THRD_STATE_RUNNABLE) {
32 p_thrd = p_thrd->next;
33 }
34
35 return p_thrd;
36}
37
38static void insert_by_prior(struct thread_t **head, struct thread_t *node)
39{
40 if (*head == NULL || (node->priority <= (*head)->priority)) {
41 node->next = *head;
42 *head = node;
43 } else {
44 struct thread_t *iter = *head;
45
46 while (iter->next && (node->priority > iter->next->priority)) {
47 iter = iter->next;
48 }
49
50 node->next = iter->next;
51 iter->next = node;
52 }
53}
54
55void thrd_start(struct thread_t *p_thrd,
56 thrd_fn_t fn, void *param,
57 uintptr_t sp_limit, uintptr_t sp)
58{
59 TFM_CORE_ASSERT(p_thrd != NULL);
60
61 /* Insert a new thread with priority */
62 insert_by_prior(&LIST_HEAD, p_thrd);
63
64 /* Mark it as RUNNABLE after insertion */
65 thrd_set_state(p_thrd, THRD_STATE_RUNNABLE);
66
67 tfm_arch_init_context(p_thrd->p_context_ctrl, (uintptr_t)fn, param,
68 (uintptr_t)fn&~1UL, sp_limit, sp);
69}
70
71void thrd_set_state(struct thread_t *p_thrd, uint32_t new_state)
72{
73 TFM_CORE_ASSERT(p_thrd != NULL);
74
75 p_thrd->state = new_state;
76
77 /*
78 * Set first runnable thread as head to reduce enumerate
79 * depth while searching for a first runnable thread.
80 */
81 if ((p_thrd->state == THRD_STATE_RUNNABLE) &&
82 ((RNBL_HEAD == NULL) || (p_thrd->priority < RNBL_HEAD->priority))) {
83 RNBL_HEAD = p_thrd;
84 } else {
85 RNBL_HEAD = LIST_HEAD;
86 }
87}
88
89uint32_t thrd_start_scheduler(struct thread_t **ppth)
90{
91 struct thread_t *pth = thrd_next();
92
93 tfm_arch_trigger_pendsv();
94
95 if (ppth) {
96 *ppth = pth;
97 }
98
99 return tfm_arch_refresh_hardware_context(pth->p_context_ctrl);
100}
101
102void thrd_wait_on(struct sync_obj_t *p_sync_obj, struct thread_t *pth)
103{
104 TFM_CORE_ASSERT(p_sync_obj && p_sync_obj->magic == THRD_SYNC_MAGIC);
105
106 p_sync_obj->owner = pth;
107 thrd_set_state(pth, THRD_STATE_BLOCK);
108 tfm_arch_trigger_pendsv();
109}
110
111void thrd_wake_up(struct sync_obj_t *p_sync_obj, uint32_t ret_val)
112{
113 TFM_CORE_ASSERT(p_sync_obj && p_sync_obj->magic == THRD_SYNC_MAGIC);
114
115 if (p_sync_obj->owner && p_sync_obj->owner->state == THRD_STATE_BLOCK) {
116 thrd_set_state(p_sync_obj->owner, THRD_STATE_RUNNABLE);
117 tfm_arch_set_context_ret_code(p_sync_obj->owner->p_context_ctrl,
118 ret_val);
119 p_sync_obj->owner = NULL;
120 tfm_arch_trigger_pendsv();
121 }
122}