blob: ec5580dbdf501562cd14fdb7de117c4fd889ea3e [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Shawn Shanb222d892021-01-04 17:41:48 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Kevin Peng3f67b2e2021-10-18 17:47:27 +080013#include "interrupt.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080014#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080015#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080016#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080017#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080018#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080019#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080020#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080021#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080023#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080024#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080025#include "tfm_rpc.h"
26#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080027#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080028#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080029#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080030
Ken Liub3b2cb62021-05-22 00:39:28 +080031#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080032extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080033
Shawn Shan038348e2021-09-08 17:11:04 +080034#if PSA_FRAMEWORK_HAS_MM_IOVEC
35
36/*
37 * The MM-IOVEC status
38 * The max total number of invec and outvec is 8.
39 * Each invec/outvec takes 4 bit, 32 bits in total.
40 *
41 * The encoding format of the MM-IOVEC status:
42 *--------------------------------------------------------------
43 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
44 *--------------------------------------------------------------
45 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
46 *--------------------------------------------------------------
47 *
48 * Take invec[0] as an example:
49 *
50 * bit 0: whether invec[0] has been mapped.
51 * bit 1: whether invec[0] has been unmapped.
52 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
53 * psa_write().
54 * bit 3: reserved for invec[0].
55 */
56
57#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
58#define OUTVEC_IDX_BASE 4 /*
59 * Base index of outvec.
60 * There are four invecs in front of
61 * outvec.
62 */
63#define INVEC_IDX_BASE 0 /* Base index of invec. */
64
65#define IOVEC_MAPPED_BIT (1U << 0)
66#define IOVEC_UNMAPPED_BIT (1U << 1)
67#define IOVEC_ACCESSED_BIT (1U << 2)
68
69#define IOVEC_IS_MAPPED(msg, iovec_idx) \
70 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
71 IOVEC_MAPPED_BIT)
72#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
73 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
74 IOVEC_UNMAPPED_BIT)
75#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
76 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
77 IOVEC_ACCESSED_BIT)
78#define SET_IOVEC_MAPPED(msg, iovec_idx) \
79 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
80 ((iovec_idx) * IOVEC_STATUS_BITS)))
81#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
82 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
83 ((iovec_idx) * IOVEC_STATUS_BITS)))
84#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
85 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
86 ((iovec_idx) * IOVEC_STATUS_BITS)))
87
88#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
89
Xinyu Zhangb287ef82021-11-03 18:38:50 +080090void spm_handle_programmer_errors(psa_status_t status)
91{
92 if (status == PSA_ERROR_PROGRAMMER_ERROR ||
93 status == PSA_ERROR_CONNECTION_REFUSED ||
94 status == PSA_ERROR_CONNECTION_BUSY) {
95 if (!tfm_spm_is_ns_caller()) {
96 tfm_core_panic();
97 }
98 }
99}
100
Mingyang Suneeca4652021-07-15 15:19:16 +0800101uint32_t tfm_spm_get_lifecycle_state(void)
102{
103 /*
104 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
105 * implemented in the future.
106 */
107 return PSA_LIFECYCLE_UNKNOWN;
108}
109
110/* PSA Client API function body */
111
Mingyang Sund44522a2020-01-16 16:48:37 +0800112uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800113{
114 return PSA_FRAMEWORK_VERSION;
115}
116
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800117uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800118{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800119 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800120 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800121
122 /*
123 * It should return PSA_VERSION_NONE if the RoT Service is not
124 * implemented.
125 */
126 service = tfm_spm_get_service_by_sid(sid);
127 if (!service) {
128 return PSA_VERSION_NONE;
129 }
130
131 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800132 * It should return PSA_VERSION_NONE if the caller is not authorized
133 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800134 */
Ken Liubcae38b2021-01-20 15:47:44 +0800135 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800136 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800137 }
138
Ken Liuacd2a572021-05-12 16:19:04 +0800139 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800140}
141
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800142psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
David Hu733d8f92019-09-23 15:32:40 +0800143{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800144 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800145 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800146 struct tfm_conn_handle_t *connect_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800147 int32_t client_id;
Ken Liu505b1702020-05-29 13:19:58 +0800148 psa_handle_t handle;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800149 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Sun620c8562021-11-10 11:44:58 +0800150 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
David Hu733d8f92019-09-23 15:32:40 +0800151
Kevin Pengedb8ee42021-03-09 16:50:11 +0800152 /*
153 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
154 * platform.
155 */
David Hu733d8f92019-09-23 15:32:40 +0800156 service = tfm_spm_get_service_by_sid(sid);
157 if (!service) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800158 return PSA_ERROR_CONNECTION_REFUSED;
David Hu733d8f92019-09-23 15:32:40 +0800159 }
160
Mingyang Sunef42f442021-06-11 15:07:58 +0800161 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
162 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800163 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sunef42f442021-06-11 15:07:58 +0800164 }
165
Kevin Pengedb8ee42021-03-09 16:50:11 +0800166 /*
167 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
168 * RoT Service.
169 */
170 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800171 return PSA_ERROR_CONNECTION_REFUSED;
Kevin Pengedb8ee42021-03-09 16:50:11 +0800172 }
173
174 /*
175 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
176 * not supported on the platform.
177 */
178 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800179 return PSA_ERROR_CONNECTION_REFUSED;
Kevin Pengedb8ee42021-03-09 16:50:11 +0800180 }
181
Kevin Peng385fda82021-08-18 10:41:19 +0800182 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800183
David Hu733d8f92019-09-23 15:32:40 +0800184 /*
185 * Create connection handle here since it is possible to return the error
186 * code to client when creation fails.
187 */
Mingyang Sun620c8562021-11-10 11:44:58 +0800188 CRITICAL_SECTION_ENTER(cs_assert);
Summer Qin1ce712a2019-10-14 18:04:05 +0800189 connect_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800190 CRITICAL_SECTION_LEAVE(cs_assert);
Summer Qin630c76b2020-05-20 10:32:58 +0800191 if (!connect_handle) {
David Hu733d8f92019-09-23 15:32:40 +0800192 return PSA_ERROR_CONNECTION_BUSY;
193 }
194
Kevin Pengdf6aa292021-03-11 17:58:50 +0800195 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
196 if (!msg) {
197 /* Have no enough resource to create message */
198 return PSA_ERROR_CONNECTION_BUSY;
199 }
David Hu733d8f92019-09-23 15:32:40 +0800200
Ken Liu505b1702020-05-29 13:19:58 +0800201 handle = tfm_spm_to_user_handle(connect_handle);
David Hu733d8f92019-09-23 15:32:40 +0800202 /* No input or output needed for connect message */
Ken Liu505b1702020-05-29 13:19:58 +0800203 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
Summer Qin1ce712a2019-10-14 18:04:05 +0800204 client_id, NULL, 0, NULL, 0, NULL);
David Hu733d8f92019-09-23 15:32:40 +0800205
Mingyang Sundeae45d2021-09-06 15:31:07 +0800206 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800207}
208
Mingyang Suneeca4652021-07-15 15:19:16 +0800209psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
210 uint32_t ctrl_param,
211 const psa_invec *inptr,
212 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800213{
214 psa_invec invecs[PSA_MAX_IOVEC];
215 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800216 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800217 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800218 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800219 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800220 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800221 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800222 uint32_t privileged;
Mingyang Sun620c8562021-11-10 11:44:58 +0800223 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800224 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800225 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
226 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
227 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800228
229 /* The request type must be zero or positive. */
230 if (type < 0) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800231 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800232 }
David Hu733d8f92019-09-23 15:32:40 +0800233
Shawn Shanb222d892021-01-04 17:41:48 +0800234 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800235 if ((in_num > PSA_MAX_IOVEC) ||
236 (out_num > PSA_MAX_IOVEC) ||
237 (in_num + out_num > PSA_MAX_IOVEC)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800238 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800239 }
240
Kevin Peng385fda82021-08-18 10:41:19 +0800241 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800242
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800243 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800244 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800245 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800246
247 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800248 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sune8d38082021-03-30 18:34:40 +0800249 }
250
Mingyang Sun453ad402021-03-17 17:58:33 +0800251 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800252 if (!service) {
253 tfm_core_panic();
254 }
255
Ken Liub3b2cb62021-05-22 00:39:28 +0800256 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800257
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800258 /*
259 * It is a PROGRAMMER ERROR if the caller is not authorized to access
260 * the RoT Service.
261 */
262 if (tfm_spm_check_authorization(sid, service, ns_caller)
263 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800264 return PSA_ERROR_CONNECTION_REFUSED;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800265 }
266
Mingyang Sun453ad402021-03-17 17:58:33 +0800267 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
268
269 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800270 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun453ad402021-03-17 17:58:33 +0800271 }
272
Mingyang Sun620c8562021-11-10 11:44:58 +0800273 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800274 conn_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800275 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800276
277 if (!conn_handle) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800278 return PSA_ERROR_CONNECTION_BUSY;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800279 }
280
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800281 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800282 handle = tfm_spm_to_user_handle(conn_handle);
283 } else {
284 conn_handle = tfm_spm_to_handle_instance(handle);
285
286 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
287 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
288 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800289 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800290 }
291
292 /*
293 * It is a PROGRAMMER ERROR if the connection is currently
294 * handling a request.
295 */
296 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800297 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800298 }
299
300 /*
301 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
302 * has been terminated by the RoT Service.
303 */
304 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
305 return PSA_ERROR_PROGRAMMER_ERROR;
306 }
307
Ken Liuf39d8eb2021-10-07 12:55:33 +0800308 service = conn_handle->internal_msg.service;
Summer Qin1ce712a2019-10-14 18:04:05 +0800309 }
Shawn Shanb222d892021-01-04 17:41:48 +0800310
David Hu733d8f92019-09-23 15:32:40 +0800311 if (!service) {
312 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800313 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800314 }
315
Mingyang Sune529e3b2021-07-12 14:46:30 +0800316 privileged = tfm_spm_get_caller_privilege_mode();
317
Kevin Pengedb8ee42021-03-09 16:50:11 +0800318 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800319 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800320 * if the memory reference for the wrap input vector is invalid or not
321 * readable.
322 */
323 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800324 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800325 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800326 }
Summer Qinba2346e2019-11-12 16:26:31 +0800327
David Hu733d8f92019-09-23 15:32:40 +0800328 /*
329 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800330 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800331 * the wrap output vector is invalid or not read-write.
332 */
333 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800334 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800335 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800336 }
337
Summer Qinf24dbb52020-07-23 14:53:54 +0800338 spm_memset(invecs, 0, sizeof(invecs));
339 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800340
341 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800342 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
343 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800344
345 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800346 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800347 * memory reference was invalid or not readable.
348 */
349 for (i = 0; i < in_num; i++) {
350 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800351 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800352 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800353 }
354 }
Summer Qinba2346e2019-11-12 16:26:31 +0800355
356 /*
357 * Clients must never overlap input parameters because of the risk of a
358 * double-fetch inconsistency.
359 * Overflow is checked in tfm_memory_check functions.
360 */
361 for (i = 0; i + 1 < in_num; i++) {
362 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100363 if (!((char *) invecs[j].base + invecs[j].len <=
364 (char *) invecs[i].base ||
365 (char *) invecs[j].base >=
366 (char *) invecs[i].base + invecs[i].len)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800367 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qinba2346e2019-11-12 16:26:31 +0800368 }
369 }
370 }
371
David Hu733d8f92019-09-23 15:32:40 +0800372 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800373 * For client output vector, it is a PROGRAMMER ERROR if the provided
374 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800375 */
376 for (i = 0; i < out_num; i++) {
377 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800378 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800379 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800380 }
381 }
382
383 /*
384 * FixMe: Need to check if the message is unrecognized by the RoT
385 * Service or incorrectly formatted.
386 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800387 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
388 if (!msg) {
389 /* FixMe: Need to implement one mechanism to resolve this failure. */
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800390 return PSA_ERROR_PROGRAMMER_ERROR;
Kevin Pengdf6aa292021-03-11 17:58:50 +0800391 }
David Hu733d8f92019-09-23 15:32:40 +0800392
Ken Liu505b1702020-05-29 13:19:58 +0800393 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800394 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800395
Mingyang Sundeae45d2021-09-06 15:31:07 +0800396 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800397}
398
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800399psa_status_t tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800400{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800401 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800402 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800403 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800404 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800405 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800406
407 /* It will have no effect if called with the NULL handle */
408 if (handle == PSA_NULL_HANDLE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800409 return PSA_SUCCESS;
David Hu733d8f92019-09-23 15:32:40 +0800410 }
411
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800412 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800413 if (IS_STATIC_HANDLE(handle)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800414 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800415 }
416
Kevin Peng385fda82021-08-18 10:41:19 +0800417 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800418
Summer Qin373feb12020-03-27 15:35:33 +0800419 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800420 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800421 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
422 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800423 */
Ken Liubcae38b2021-01-20 15:47:44 +0800424 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800425 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qin1ce712a2019-10-14 18:04:05 +0800426 }
Shawn Shanb222d892021-01-04 17:41:48 +0800427
Ken Liuf39d8eb2021-10-07 12:55:33 +0800428 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800429 if (!service) {
430 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800431 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800432 }
433
Kevin Pengdf6aa292021-03-11 17:58:50 +0800434 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
435 if (!msg) {
436 /* FixMe: Need to implement one mechanism to resolve this failure. */
437 tfm_core_panic();
438 }
David Hu733d8f92019-09-23 15:32:40 +0800439
Shawn Shanb222d892021-01-04 17:41:48 +0800440 /*
441 * It is a PROGRAMMER ERROR if the connection is currently handling a
442 * request.
443 */
Summer Qin630c76b2020-05-20 10:32:58 +0800444 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800445 return PSA_ERROR_PROGRAMMER_ERROR;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800446 }
447
David Hu733d8f92019-09-23 15:32:40 +0800448 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800449 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800450 NULL, 0, NULL, 0, NULL);
451
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800452 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800453}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800454
Mingyang Suneeca4652021-07-15 15:19:16 +0800455/* PSA Partition API function body */
456
Mingyang Sunb26b2802021-07-07 11:25:00 +0800457psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
458 uint32_t timeout)
459{
460 struct partition_t *partition = NULL;
461
462 /*
463 * Timeout[30:0] are reserved for future use.
464 * SPM must ignore the value of RES.
465 */
466 timeout &= PSA_TIMEOUT_MASK;
467
468 partition = tfm_spm_get_running_partition();
469 if (!partition) {
470 tfm_core_panic();
471 }
472
473 /*
474 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
475 * signals.
476 */
477 if ((partition->signals_allowed & signal_mask) == 0) {
478 tfm_core_panic();
479 }
480
481 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800482 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800483 * In this case, the return value of this function is temporary set into
484 * runtime context. After new signal(s) are available, the return value
485 * is updated with the available signal(s) and blocked thread gets to run.
486 */
487 if (timeout == PSA_BLOCK &&
488 (partition->signals_asserted & signal_mask) == 0) {
489 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800490 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800491 }
492
493 return partition->signals_asserted & signal_mask;
494}
495
496psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
497{
498 struct tfm_msg_body_t *tmp_msg = NULL;
499 struct partition_t *partition = NULL;
500 uint32_t privileged;
501
502 /*
503 * Only one message could be retrieved every time for psa_get(). It is a
504 * fatal error if the input signal has more than a signal bit set.
505 */
506 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
507 tfm_core_panic();
508 }
509
510 partition = tfm_spm_get_running_partition();
511 if (!partition) {
512 tfm_core_panic();
513 }
514 privileged = tfm_spm_partition_get_privileged_mode(
515 partition->p_ldinf->flags);
516
517 /*
518 * Write the message to the service buffer. It is a fatal error if the
519 * input msg pointer is not a valid memory reference or not read-write.
520 */
521 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
522 privileged) != SPM_SUCCESS) {
523 tfm_core_panic();
524 }
525
526 /*
527 * It is a fatal error if the caller call psa_get() when no message has
528 * been set. The caller must call this function after an RoT Service signal
529 * is returned by psa_wait().
530 */
531 if (partition->signals_asserted == 0) {
532 tfm_core_panic();
533 }
534
535 /*
536 * It is a fatal error if the RoT Service signal is not currently asserted.
537 */
538 if ((partition->signals_asserted & signal) == 0) {
539 tfm_core_panic();
540 }
541
542 /*
543 * Get message by signal from partition. It is a fatal error if getting
544 * failed, which means the input signal is not correspond to an RoT service.
545 */
546 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
547 if (!tmp_msg) {
548 return PSA_ERROR_DOES_NOT_EXIST;
549 }
550
551 (TO_CONTAINER(tmp_msg,
552 struct tfm_conn_handle_t,
553 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
554
555 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
556
557 return PSA_SUCCESS;
558}
559
560void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
561{
562 struct tfm_msg_body_t *msg = NULL;
563 struct tfm_conn_handle_t *conn_handle;
564
565 /* It is a fatal error if message handle is invalid */
566 msg = tfm_spm_get_msg_from_handle(msg_handle);
567 if (!msg) {
568 tfm_core_panic();
569 }
570
571 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
572 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
573 tfm_core_panic();
574 }
575
576 msg->msg.rhandle = rhandle;
577 conn_handle = tfm_spm_to_handle_instance(msg_handle);
578
579 /* Store reverse handle for following client calls. */
580 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
581}
582
583size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
584 void *buffer, size_t num_bytes)
585{
586 size_t bytes;
587 struct tfm_msg_body_t *msg = NULL;
588 uint32_t privileged;
589 struct partition_t *partition = NULL;
590
591 /* It is a fatal error if message handle is invalid */
592 msg = tfm_spm_get_msg_from_handle(msg_handle);
593 if (!msg) {
594 tfm_core_panic();
595 }
596
597 partition = msg->service->partition;
598 privileged = tfm_spm_partition_get_privileged_mode(
599 partition->p_ldinf->flags);
600
601 /*
602 * It is a fatal error if message handle does not refer to a request
603 * message
604 */
605 if (msg->msg.type < PSA_IPC_CALL) {
606 tfm_core_panic();
607 }
608
609 /*
610 * It is a fatal error if invec_idx is equal to or greater than
611 * PSA_MAX_IOVEC
612 */
613 if (invec_idx >= PSA_MAX_IOVEC) {
614 tfm_core_panic();
615 }
616
617 /* There was no remaining data in this input vector */
618 if (msg->msg.in_size[invec_idx] == 0) {
619 return 0;
620 }
621
Shawn Shan038348e2021-09-08 17:11:04 +0800622#if PSA_FRAMEWORK_HAS_MM_IOVEC
623 /*
624 * It is a fatal error if the input vector has already been mapped using
625 * psa_map_invec().
626 */
627 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
628 tfm_core_panic();
629 }
630
631 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
632#endif
633
Mingyang Sunb26b2802021-07-07 11:25:00 +0800634 /*
635 * Copy the client data to the service buffer. It is a fatal error
636 * if the memory reference for buffer is invalid or not read-write.
637 */
638 if (tfm_memory_check(buffer, num_bytes, false,
639 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
640 tfm_core_panic();
641 }
642
643 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
644 msg->msg.in_size[invec_idx] : num_bytes;
645
646 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
647
648 /* There maybe some remaining data */
649 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
650 msg->msg.in_size[invec_idx] -= bytes;
651
652 return bytes;
653}
654
655size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
656 size_t num_bytes)
657{
658 struct tfm_msg_body_t *msg = NULL;
659
660 /* It is a fatal error if message handle is invalid */
661 msg = tfm_spm_get_msg_from_handle(msg_handle);
662 if (!msg) {
663 tfm_core_panic();
664 }
665
666 /*
667 * It is a fatal error if message handle does not refer to a request
668 * message
669 */
670 if (msg->msg.type < PSA_IPC_CALL) {
671 tfm_core_panic();
672 }
673
674 /*
675 * It is a fatal error if invec_idx is equal to or greater than
676 * PSA_MAX_IOVEC
677 */
678 if (invec_idx >= PSA_MAX_IOVEC) {
679 tfm_core_panic();
680 }
681
682 /* There was no remaining data in this input vector */
683 if (msg->msg.in_size[invec_idx] == 0) {
684 return 0;
685 }
686
Shawn Shan038348e2021-09-08 17:11:04 +0800687#if PSA_FRAMEWORK_HAS_MM_IOVEC
688 /*
689 * It is a fatal error if the input vector has already been mapped using
690 * psa_map_invec().
691 */
692 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
693 tfm_core_panic();
694 }
695
696 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
697#endif
698
Mingyang Sunb26b2802021-07-07 11:25:00 +0800699 /*
700 * If num_bytes is greater than the remaining size of the input vector then
701 * the remaining size of the input vector is used.
702 */
703 if (num_bytes > msg->msg.in_size[invec_idx]) {
704 num_bytes = msg->msg.in_size[invec_idx];
705 }
706
707 /* There maybe some remaining data */
708 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
709 num_bytes;
710 msg->msg.in_size[invec_idx] -= num_bytes;
711
712 return num_bytes;
713}
714
715void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
716 const void *buffer, size_t num_bytes)
717{
718 struct tfm_msg_body_t *msg = NULL;
719 uint32_t privileged;
720 struct partition_t *partition = NULL;
721
722 /* It is a fatal error if message handle is invalid */
723 msg = tfm_spm_get_msg_from_handle(msg_handle);
724 if (!msg) {
725 tfm_core_panic();
726 }
727
728 partition = msg->service->partition;
729 privileged = tfm_spm_partition_get_privileged_mode(
730 partition->p_ldinf->flags);
731
732 /*
733 * It is a fatal error if message handle does not refer to a request
734 * message
735 */
736 if (msg->msg.type < PSA_IPC_CALL) {
737 tfm_core_panic();
738 }
739
740 /*
741 * It is a fatal error if outvec_idx is equal to or greater than
742 * PSA_MAX_IOVEC
743 */
744 if (outvec_idx >= PSA_MAX_IOVEC) {
745 tfm_core_panic();
746 }
747
748 /*
749 * It is a fatal error if the call attempts to write data past the end of
750 * the client output vector
751 */
752 if (num_bytes > msg->msg.out_size[outvec_idx] -
753 msg->outvec[outvec_idx].len) {
754 tfm_core_panic();
755 }
756
Shawn Shan038348e2021-09-08 17:11:04 +0800757#if PSA_FRAMEWORK_HAS_MM_IOVEC
758 /*
759 * It is a fatal error if the output vector has already been mapped using
760 * psa_map_outvec().
761 */
762 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
763 tfm_core_panic();
764 }
765
766 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
767#endif
768
Mingyang Sunb26b2802021-07-07 11:25:00 +0800769 /*
770 * Copy the service buffer to client outvecs. It is a fatal error
771 * if the memory reference for buffer is invalid or not readable.
772 */
773 if (tfm_memory_check(buffer, num_bytes, false,
774 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
775 tfm_core_panic();
776 }
777
778 spm_memcpy((char *)msg->outvec[outvec_idx].base +
779 msg->outvec[outvec_idx].len, buffer, num_bytes);
780
781 /* Update the write number */
782 msg->outvec[outvec_idx].len += num_bytes;
783}
784
Ken Liuf39d8eb2021-10-07 12:55:33 +0800785int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
786 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800787{
788 struct service_t *service = NULL;
789 struct tfm_msg_body_t *msg = NULL;
790 int32_t ret = PSA_SUCCESS;
791 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun620c8562021-11-10 11:44:58 +0800792 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800793
794 /* It is a fatal error if message handle is invalid */
795 msg = tfm_spm_get_msg_from_handle(msg_handle);
796 if (!msg) {
797 tfm_core_panic();
798 }
799
800 /*
801 * RoT Service information is needed in this function, stored it in message
802 * body structure. Only two parameters are passed in this function: handle
803 * and status, so it is useful and simply to do like this.
804 */
805 service = msg->service;
806 if (!service) {
807 tfm_core_panic();
808 }
809
810 /*
811 * Three type of message are passed in this function: CONNECTION, REQUEST,
812 * DISCONNECTION. It needs to process differently for each type.
813 */
814 conn_handle = tfm_spm_to_handle_instance(msg_handle);
815 switch (msg->msg.type) {
816 case PSA_IPC_CONNECT:
817 /*
818 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
819 * input status is PSA_SUCCESS. Others return values are based on the
820 * input status.
821 */
822 if (status == PSA_SUCCESS) {
823 ret = msg_handle;
824 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
825 /* Refuse the client connection, indicating a permanent error. */
826 tfm_spm_free_conn_handle(service, conn_handle);
827 ret = PSA_ERROR_CONNECTION_REFUSED;
828 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
829 /* Fail the client connection, indicating a transient error. */
830 ret = PSA_ERROR_CONNECTION_BUSY;
831 } else {
832 tfm_core_panic();
833 }
834 break;
835 case PSA_IPC_DISCONNECT:
836 /* Service handle is not used anymore */
837 tfm_spm_free_conn_handle(service, conn_handle);
838
839 /*
840 * If the message type is PSA_IPC_DISCONNECT, then the status code is
841 * ignored
842 */
843 break;
844 default:
845 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800846
847#if PSA_FRAMEWORK_HAS_MM_IOVEC
848
849 /*
850 * If the unmapped function is not called for an input/output vector
851 * that has been mapped, the framework will remove the mapping.
852 */
853 int i;
854
855 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
856 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
857 SET_IOVEC_UNMAPPED(msg, i);
858 /*
859 * Any output vectors that are still mapped will report that
860 * zero bytes have been written.
861 */
862 if (i >= OUTVEC_IDX_BASE) {
863 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
864 }
865 }
866 }
867
868#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800869 /* Reply to a request message. Return values are based on status */
870 ret = status;
871 /*
872 * The total number of bytes written to a single parameter must be
873 * reported to the client by updating the len member of the
874 * psa_outvec structure for the parameter before returning from
875 * psa_call().
876 */
877 update_caller_outvec_len(msg);
878 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
879 tfm_spm_free_conn_handle(service, conn_handle);
880 }
881 } else {
882 tfm_core_panic();
883 }
884 }
885
886 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
887 /*
888 * If the source of the programmer error is a Secure Partition, the SPM
889 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
890 */
891 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
892 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
893 } else {
894 tfm_core_panic();
895 }
896 } else {
897 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
898 }
899
Mingyang Sun620c8562021-11-10 11:44:58 +0800900 /*
901 * TODO: It can be optimized further by moving critical section protection
902 * to mailbox. Also need to check implementation when secure context is
903 * involved.
904 */
905 CRITICAL_SECTION_ENTER(cs_assert);
906 ret = backend_instance.replying(msg, ret);
907 CRITICAL_SECTION_LEAVE(cs_assert);
908
909 return ret;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800910}
911
912void tfm_spm_partition_psa_notify(int32_t partition_id)
913{
Ken Liu5d73c872021-08-19 19:23:17 +0800914 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
915
916 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800917}
918
919void tfm_spm_partition_psa_clear(void)
920{
Ken Liu92ede9f2021-10-20 09:35:00 +0800921 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800922 struct partition_t *partition = NULL;
923
924 partition = tfm_spm_get_running_partition();
925 if (!partition) {
926 tfm_core_panic();
927 }
928
929 /*
930 * It is a fatal error if the Secure Partition's doorbell signal is not
931 * currently asserted.
932 */
933 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
934 tfm_core_panic();
935 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800936
937 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800938 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800939 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800940}
941
942void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
943{
Ken Liu92ede9f2021-10-20 09:35:00 +0800944 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800945 struct irq_load_info_t *irq_info = NULL;
946 struct partition_t *partition = NULL;
947
948 partition = tfm_spm_get_running_partition();
949 if (!partition) {
950 tfm_core_panic();
951 }
952
953 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
954 /* It is a fatal error if passed signal is not an interrupt signal. */
955 if (!irq_info) {
956 tfm_core_panic();
957 }
958
959 if (irq_info->flih_func) {
960 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800961 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800962 }
963
964 /* It is a fatal error if passed signal is not currently asserted */
965 if ((partition->signals_asserted & irq_signal) == 0) {
966 tfm_core_panic();
967 }
968
Ken Liu92ede9f2021-10-20 09:35:00 +0800969 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800970 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800971 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800972
Kevin Pengd399a1f2021-09-08 15:33:14 +0800973 tfm_hal_irq_clear_pending(irq_info->source);
974 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800975}
976
977void tfm_spm_partition_psa_panic(void)
978{
979 /*
980 * PSA FF recommends that the SPM causes the system to restart when a secure
981 * partition panics.
982 */
983 tfm_hal_system_reset();
984}
985
Kevin Peng67a89fd2021-11-25 11:22:02 +0800986void tfm_spm_partition_psa_irq_enable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800987{
988 struct partition_t *partition;
989 struct irq_load_info_t *irq_info;
990
991 partition = tfm_spm_get_running_partition();
992 if (!partition) {
993 tfm_core_panic();
994 }
995
996 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
997 if (!irq_info) {
998 tfm_core_panic();
999 }
1000
Kevin Pengd399a1f2021-09-08 15:33:14 +08001001 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001002}
1003
Kevin Peng67a89fd2021-11-25 11:22:02 +08001004psa_irq_status_t tfm_spm_partition_psa_irq_disable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +08001005{
1006 struct partition_t *partition;
1007 struct irq_load_info_t *irq_info;
1008
1009 partition = tfm_spm_get_running_partition();
1010 if (!partition) {
1011 tfm_core_panic();
1012 }
1013
1014 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1015 if (!irq_info) {
1016 tfm_core_panic();
1017 }
1018
Kevin Pengd399a1f2021-09-08 15:33:14 +08001019 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001020
1021 return 1;
1022}
1023
1024void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
1025{
Ken Liu92ede9f2021-10-20 09:35:00 +08001026 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +08001027 struct irq_load_info_t *irq_info;
1028 struct partition_t *partition;
1029
1030 partition = tfm_spm_get_running_partition();
1031 if (!partition) {
1032 tfm_core_panic();
1033 }
1034
1035 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1036 if (!irq_info) {
1037 tfm_core_panic();
1038 }
1039
1040 if (!irq_info->flih_func) {
1041 /* This API is for FLIH IRQs only */
1042 tfm_core_panic();
1043 }
1044
1045 if ((partition->signals_asserted & irq_signal) == 0) {
1046 /* The signal is not asserted */
1047 tfm_core_panic();
1048 }
1049
Ken Liu92ede9f2021-10-20 09:35:00 +08001050 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001051 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001052 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001053}
Shawn Shan038348e2021-09-08 17:11:04 +08001054
1055#if PSA_FRAMEWORK_HAS_MM_IOVEC
1056
1057const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1058 uint32_t invec_idx)
1059{
1060 struct tfm_msg_body_t *msg = NULL;
1061 uint32_t privileged;
1062 struct partition_t *partition = NULL;
1063
1064 /* It is a fatal error if message handle is invalid */
1065 msg = tfm_spm_get_msg_from_handle(msg_handle);
1066 if (!msg) {
1067 tfm_core_panic();
1068 }
1069
1070 partition = msg->service->partition;
1071 privileged = tfm_spm_partition_get_privileged_mode(
1072 partition->p_ldinf->flags);
1073
1074 /*
1075 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1076 * Service that received the message.
1077 */
1078 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1079 tfm_core_panic();
1080 }
1081
1082 /*
1083 * It is a fatal error if message handle does not refer to a request
1084 * message.
1085 */
1086 if (msg->msg.type < PSA_IPC_CALL) {
1087 tfm_core_panic();
1088 }
1089
1090 /*
1091 * It is a fatal error if invec_idx is equal to or greater than
1092 * PSA_MAX_IOVEC.
1093 */
1094 if (invec_idx >= PSA_MAX_IOVEC) {
1095 tfm_core_panic();
1096 }
1097
1098 /* It is a fatal error if the input vector has length zero. */
1099 if (msg->msg.in_size[invec_idx] == 0) {
1100 tfm_core_panic();
1101 }
1102
1103 /*
1104 * It is a fatal error if the input vector has already been mapped using
1105 * psa_map_invec().
1106 */
1107 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1108 tfm_core_panic();
1109 }
1110
1111 /*
1112 * It is a fatal error if the input vector has already been accessed
1113 * using psa_read() or psa_skip().
1114 */
1115 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1116 tfm_core_panic();
1117 }
1118
1119 /*
1120 * It is a fatal error if the memory reference for the wrap input vector is
1121 * invalid or not readable.
1122 */
1123 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1124 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1125 tfm_core_panic();
1126 }
1127
1128 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1129
1130 return msg->invec[invec_idx].base;
1131}
1132
1133void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1134 uint32_t invec_idx)
1135{
1136 struct tfm_msg_body_t *msg = NULL;
1137
1138 /* It is a fatal error if message handle is invalid */
1139 msg = tfm_spm_get_msg_from_handle(msg_handle);
1140 if (!msg) {
1141 tfm_core_panic();
1142 }
1143
1144 /*
1145 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1146 * Service that received the message.
1147 */
1148 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1149 tfm_core_panic();
1150 }
1151
1152 /*
1153 * It is a fatal error if message handle does not refer to a request
1154 * message.
1155 */
1156 if (msg->msg.type < PSA_IPC_CALL) {
1157 tfm_core_panic();
1158 }
1159
1160 /*
1161 * It is a fatal error if invec_idx is equal to or greater than
1162 * PSA_MAX_IOVEC.
1163 */
1164 if (invec_idx >= PSA_MAX_IOVEC) {
1165 tfm_core_panic();
1166 }
1167
1168 /*
1169 * It is a fatal error if The input vector has not been mapped by a call to
1170 * psa_map_invec().
1171 */
1172 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1173 tfm_core_panic();
1174 }
1175
1176 /*
1177 * It is a fatal error if the input vector has already been unmapped by a
1178 * call to psa_unmap_invec().
1179 */
1180 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1181 tfm_core_panic();
1182 }
1183
1184 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1185}
1186
1187void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1188 uint32_t outvec_idx)
1189{
1190 struct tfm_msg_body_t *msg = NULL;
1191 uint32_t privileged;
1192 struct partition_t *partition = NULL;
1193
1194 /* It is a fatal error if message handle is invalid */
1195 msg = tfm_spm_get_msg_from_handle(msg_handle);
1196 if (!msg) {
1197 tfm_core_panic();
1198 }
1199
1200 partition = msg->service->partition;
1201 privileged = tfm_spm_partition_get_privileged_mode(
1202 partition->p_ldinf->flags);
1203
1204 /*
1205 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1206 * Service that received the message.
1207 */
1208 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1209 tfm_core_panic();
1210 }
1211
1212 /*
1213 * It is a fatal error if message handle does not refer to a request
1214 * message.
1215 */
1216 if (msg->msg.type < PSA_IPC_CALL) {
1217 tfm_core_panic();
1218 }
1219
1220 /*
1221 * It is a fatal error if outvec_idx is equal to or greater than
1222 * PSA_MAX_IOVEC.
1223 */
1224 if (outvec_idx >= PSA_MAX_IOVEC) {
1225 tfm_core_panic();
1226 }
1227
1228 /* It is a fatal error if the output vector has length zero. */
1229 if (msg->msg.out_size[outvec_idx] == 0) {
1230 tfm_core_panic();
1231 }
1232
1233 /*
1234 * It is a fatal error if the output vector has already been mapped using
1235 * psa_map_outvec().
1236 */
1237 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1238 tfm_core_panic();
1239 }
1240
1241 /*
1242 * It is a fatal error if the output vector has already been accessed
1243 * using psa_write().
1244 */
1245 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1246 tfm_core_panic();
1247 }
1248
1249 /*
1250 * It is a fatal error if the output vector is invalid or not read-write.
1251 */
1252 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1253 msg->outvec[outvec_idx].len, false,
1254 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1255 tfm_core_panic();
1256 }
1257 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1258
1259 return msg->outvec[outvec_idx].base;
1260}
1261
1262void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1263 uint32_t outvec_idx, size_t len)
1264{
1265 struct tfm_msg_body_t *msg = NULL;
1266
1267 /* It is a fatal error if message handle is invalid */
1268 msg = tfm_spm_get_msg_from_handle(msg_handle);
1269 if (!msg) {
1270 tfm_core_panic();
1271 }
1272
1273 /*
1274 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1275 * Service that received the message.
1276 */
1277 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1278 tfm_core_panic();
1279 }
1280
1281 /*
1282 * It is a fatal error if message handle does not refer to a request
1283 * message.
1284 */
1285 if (msg->msg.type < PSA_IPC_CALL) {
1286 tfm_core_panic();
1287 }
1288
1289 /*
1290 * It is a fatal error if outvec_idx is equal to or greater than
1291 * PSA_MAX_IOVEC.
1292 */
1293 if (outvec_idx >= PSA_MAX_IOVEC) {
1294 tfm_core_panic();
1295 }
1296
1297 /*
1298 * It is a fatal error if len is greater than the output vector size.
1299 */
1300 if (len > msg->msg.out_size[outvec_idx]) {
1301 tfm_core_panic();
1302 }
1303
1304 /*
1305 * It is a fatal error if The output vector has not been mapped by a call to
1306 * psa_map_outvec().
1307 */
1308 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1309 tfm_core_panic();
1310 }
1311
1312 /*
1313 * It is a fatal error if the output vector has already been unmapped by a
1314 * call to psa_unmap_outvec().
1315 */
1316 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1317 tfm_core_panic();
1318 }
1319
1320 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1321
1322 /* Update the write number */
1323 msg->outvec[outvec_idx].len = len;
1324}
1325
1326#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */