blob: eaf64309306c7acaaba0d9e7ba389989bcad063c [file] [log] [blame]
Ken Liu5d73c872021-08-19 19:23:17 +08001/*
2 * Copyright (c) 2018-2021, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8#include <stdint.h>
9#include "thread.h"
10#include "tfm_arch.h"
11#include "utilities.h"
12
Mingyang Sun4f08f4d2021-09-10 17:41:28 +080013/* Declaration of current thread pointer. */
14struct thread_t *p_curr_thrd;
15
Ken Liu5d73c872021-08-19 19:23:17 +080016/* Force ZERO in case ZI(bss) clear is missing. */
17static struct thread_t *p_thrd_head = NULL; /* Point to the first thread. */
18static struct thread_t *p_rnbl_head = NULL; /* Point to the first runnable. */
19
20/* Define Macro to fetch global to support future expansion (PERCPU e.g.) */
21#define LIST_HEAD p_thrd_head
22#define RNBL_HEAD p_rnbl_head
23
24struct thread_t *thrd_next(void)
25{
26 struct thread_t *p_thrd = RNBL_HEAD;
27 /*
28 * First runnable thread has highest priority since threads are
29 * sorted by priority.
30 */
31 while (p_thrd && p_thrd->state != THRD_STATE_RUNNABLE) {
32 p_thrd = p_thrd->next;
33 }
34
35 return p_thrd;
36}
37
38static void insert_by_prior(struct thread_t **head, struct thread_t *node)
39{
40 if (*head == NULL || (node->priority <= (*head)->priority)) {
41 node->next = *head;
42 *head = node;
43 } else {
44 struct thread_t *iter = *head;
45
46 while (iter->next && (node->priority > iter->next->priority)) {
47 iter = iter->next;
48 }
49
50 node->next = iter->next;
51 iter->next = node;
52 }
53}
54
Ken Liubf4681f2022-02-11 11:15:03 +080055void thrd_start(struct thread_t *p_thrd, thrd_fn_t fn, thrd_fn_t exit_fn)
Ken Liu5d73c872021-08-19 19:23:17 +080056{
57 TFM_CORE_ASSERT(p_thrd != NULL);
58
59 /* Insert a new thread with priority */
60 insert_by_prior(&LIST_HEAD, p_thrd);
61
Ken Liubf4681f2022-02-11 11:15:03 +080062 tfm_arch_init_context(p_thrd->p_context_ctrl, (uintptr_t)fn, NULL,
63 (uintptr_t)exit_fn);
64
Ken Liu5d73c872021-08-19 19:23:17 +080065 /* Mark it as RUNNABLE after insertion */
66 thrd_set_state(p_thrd, THRD_STATE_RUNNABLE);
Ken Liu5d73c872021-08-19 19:23:17 +080067}
68
69void thrd_set_state(struct thread_t *p_thrd, uint32_t new_state)
70{
71 TFM_CORE_ASSERT(p_thrd != NULL);
72
73 p_thrd->state = new_state;
74
75 /*
76 * Set first runnable thread as head to reduce enumerate
77 * depth while searching for a first runnable thread.
78 */
79 if ((p_thrd->state == THRD_STATE_RUNNABLE) &&
80 ((RNBL_HEAD == NULL) || (p_thrd->priority < RNBL_HEAD->priority))) {
81 RNBL_HEAD = p_thrd;
82 } else {
83 RNBL_HEAD = LIST_HEAD;
84 }
85}
86
87uint32_t thrd_start_scheduler(struct thread_t **ppth)
88{
89 struct thread_t *pth = thrd_next();
90
91 tfm_arch_trigger_pendsv();
92
93 if (ppth) {
94 *ppth = pth;
95 }
96
97 return tfm_arch_refresh_hardware_context(pth->p_context_ctrl);
98}
99
100void thrd_wait_on(struct sync_obj_t *p_sync_obj, struct thread_t *pth)
101{
102 TFM_CORE_ASSERT(p_sync_obj && p_sync_obj->magic == THRD_SYNC_MAGIC);
103
104 p_sync_obj->owner = pth;
105 thrd_set_state(pth, THRD_STATE_BLOCK);
Ken Liu5d73c872021-08-19 19:23:17 +0800106}
107
108void thrd_wake_up(struct sync_obj_t *p_sync_obj, uint32_t ret_val)
109{
110 TFM_CORE_ASSERT(p_sync_obj && p_sync_obj->magic == THRD_SYNC_MAGIC);
111
112 if (p_sync_obj->owner && p_sync_obj->owner->state == THRD_STATE_BLOCK) {
113 thrd_set_state(p_sync_obj->owner, THRD_STATE_RUNNABLE);
114 tfm_arch_set_context_ret_code(p_sync_obj->owner->p_context_ctrl,
115 ret_val);
116 p_sync_obj->owner = NULL;
Ken Liu5d73c872021-08-19 19:23:17 +0800117 }
118}