blob: 9d3e6e1098a6becbbe56d6f62a047a94cb5c5d64 [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Shawn Shanb222d892021-01-04 17:41:48 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080013#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080014#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080015#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080016#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080017#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080018#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080019#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080020#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080021#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080022#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080023#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080024#include "tfm_rpc.h"
25#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080026#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080027#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080028#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080029
Ken Liub3b2cb62021-05-22 00:39:28 +080030#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080031extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080032
Shawn Shan038348e2021-09-08 17:11:04 +080033#if PSA_FRAMEWORK_HAS_MM_IOVEC
34
35/*
36 * The MM-IOVEC status
37 * The max total number of invec and outvec is 8.
38 * Each invec/outvec takes 4 bit, 32 bits in total.
39 *
40 * The encoding format of the MM-IOVEC status:
41 *--------------------------------------------------------------
42 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
43 *--------------------------------------------------------------
44 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
45 *--------------------------------------------------------------
46 *
47 * Take invec[0] as an example:
48 *
49 * bit 0: whether invec[0] has been mapped.
50 * bit 1: whether invec[0] has been unmapped.
51 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
52 * psa_write().
53 * bit 3: reserved for invec[0].
54 */
55
56#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
57#define OUTVEC_IDX_BASE 4 /*
58 * Base index of outvec.
59 * There are four invecs in front of
60 * outvec.
61 */
62#define INVEC_IDX_BASE 0 /* Base index of invec. */
63
64#define IOVEC_MAPPED_BIT (1U << 0)
65#define IOVEC_UNMAPPED_BIT (1U << 1)
66#define IOVEC_ACCESSED_BIT (1U << 2)
67
68#define IOVEC_IS_MAPPED(msg, iovec_idx) \
69 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
70 IOVEC_MAPPED_BIT)
71#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
72 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
73 IOVEC_UNMAPPED_BIT)
74#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
75 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
76 IOVEC_ACCESSED_BIT)
77#define SET_IOVEC_MAPPED(msg, iovec_idx) \
78 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
79 ((iovec_idx) * IOVEC_STATUS_BITS)))
80#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
81 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
82 ((iovec_idx) * IOVEC_STATUS_BITS)))
83#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
84 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
85 ((iovec_idx) * IOVEC_STATUS_BITS)))
86
87#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
88
Xinyu Zhangb287ef82021-11-03 18:38:50 +080089void spm_handle_programmer_errors(psa_status_t status)
90{
91 if (status == PSA_ERROR_PROGRAMMER_ERROR ||
92 status == PSA_ERROR_CONNECTION_REFUSED ||
93 status == PSA_ERROR_CONNECTION_BUSY) {
94 if (!tfm_spm_is_ns_caller()) {
95 tfm_core_panic();
96 }
97 }
98}
99
Mingyang Suneeca4652021-07-15 15:19:16 +0800100uint32_t tfm_spm_get_lifecycle_state(void)
101{
102 /*
103 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
104 * implemented in the future.
105 */
106 return PSA_LIFECYCLE_UNKNOWN;
107}
108
109/* PSA Client API function body */
110
Mingyang Sund44522a2020-01-16 16:48:37 +0800111uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800112{
113 return PSA_FRAMEWORK_VERSION;
114}
115
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800116uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800117{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800118 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800119 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800120
121 /*
122 * It should return PSA_VERSION_NONE if the RoT Service is not
123 * implemented.
124 */
125 service = tfm_spm_get_service_by_sid(sid);
126 if (!service) {
127 return PSA_VERSION_NONE;
128 }
129
130 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800131 * It should return PSA_VERSION_NONE if the caller is not authorized
132 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800133 */
Ken Liubcae38b2021-01-20 15:47:44 +0800134 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800135 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800136 }
137
Ken Liuacd2a572021-05-12 16:19:04 +0800138 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800139}
140
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800141psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
David Hu733d8f92019-09-23 15:32:40 +0800142{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800143 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800144 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800145 struct tfm_conn_handle_t *connect_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800146 int32_t client_id;
Ken Liu505b1702020-05-29 13:19:58 +0800147 psa_handle_t handle;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800148 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Sun620c8562021-11-10 11:44:58 +0800149 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
David Hu733d8f92019-09-23 15:32:40 +0800150
Kevin Pengedb8ee42021-03-09 16:50:11 +0800151 /*
152 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
153 * platform.
154 */
David Hu733d8f92019-09-23 15:32:40 +0800155 service = tfm_spm_get_service_by_sid(sid);
156 if (!service) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800157 return PSA_ERROR_CONNECTION_REFUSED;
David Hu733d8f92019-09-23 15:32:40 +0800158 }
159
Mingyang Sunef42f442021-06-11 15:07:58 +0800160 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
161 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800162 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sunef42f442021-06-11 15:07:58 +0800163 }
164
Kevin Pengedb8ee42021-03-09 16:50:11 +0800165 /*
166 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
167 * RoT Service.
168 */
169 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800170 return PSA_ERROR_CONNECTION_REFUSED;
Kevin Pengedb8ee42021-03-09 16:50:11 +0800171 }
172
173 /*
174 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
175 * not supported on the platform.
176 */
177 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800178 return PSA_ERROR_CONNECTION_REFUSED;
Kevin Pengedb8ee42021-03-09 16:50:11 +0800179 }
180
Kevin Peng385fda82021-08-18 10:41:19 +0800181 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800182
David Hu733d8f92019-09-23 15:32:40 +0800183 /*
184 * Create connection handle here since it is possible to return the error
185 * code to client when creation fails.
186 */
Mingyang Sun620c8562021-11-10 11:44:58 +0800187 CRITICAL_SECTION_ENTER(cs_assert);
Summer Qin1ce712a2019-10-14 18:04:05 +0800188 connect_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800189 CRITICAL_SECTION_LEAVE(cs_assert);
Summer Qin630c76b2020-05-20 10:32:58 +0800190 if (!connect_handle) {
David Hu733d8f92019-09-23 15:32:40 +0800191 return PSA_ERROR_CONNECTION_BUSY;
192 }
193
Kevin Pengdf6aa292021-03-11 17:58:50 +0800194 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
195 if (!msg) {
196 /* Have no enough resource to create message */
197 return PSA_ERROR_CONNECTION_BUSY;
198 }
David Hu733d8f92019-09-23 15:32:40 +0800199
Ken Liu505b1702020-05-29 13:19:58 +0800200 handle = tfm_spm_to_user_handle(connect_handle);
David Hu733d8f92019-09-23 15:32:40 +0800201 /* No input or output needed for connect message */
Ken Liu505b1702020-05-29 13:19:58 +0800202 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
Summer Qin1ce712a2019-10-14 18:04:05 +0800203 client_id, NULL, 0, NULL, 0, NULL);
David Hu733d8f92019-09-23 15:32:40 +0800204
Mingyang Sundeae45d2021-09-06 15:31:07 +0800205 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800206}
207
Mingyang Suneeca4652021-07-15 15:19:16 +0800208psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
209 uint32_t ctrl_param,
210 const psa_invec *inptr,
211 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800212{
213 psa_invec invecs[PSA_MAX_IOVEC];
214 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800215 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800216 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800217 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800218 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800219 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800220 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800221 uint32_t privileged;
Mingyang Sun620c8562021-11-10 11:44:58 +0800222 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800223 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800224 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
225 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
226 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800227
228 /* The request type must be zero or positive. */
229 if (type < 0) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800230 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800231 }
David Hu733d8f92019-09-23 15:32:40 +0800232
Shawn Shanb222d892021-01-04 17:41:48 +0800233 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800234 if ((in_num > PSA_MAX_IOVEC) ||
235 (out_num > PSA_MAX_IOVEC) ||
236 (in_num + out_num > PSA_MAX_IOVEC)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800237 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800238 }
239
Kevin Peng385fda82021-08-18 10:41:19 +0800240 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800241
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800242 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800243 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800244 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800245
246 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800247 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sune8d38082021-03-30 18:34:40 +0800248 }
249
Mingyang Sun453ad402021-03-17 17:58:33 +0800250 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800251 if (!service) {
252 tfm_core_panic();
253 }
254
Ken Liub3b2cb62021-05-22 00:39:28 +0800255 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800256
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800257 /*
258 * It is a PROGRAMMER ERROR if the caller is not authorized to access
259 * the RoT Service.
260 */
261 if (tfm_spm_check_authorization(sid, service, ns_caller)
262 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800263 return PSA_ERROR_CONNECTION_REFUSED;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800264 }
265
Mingyang Sun453ad402021-03-17 17:58:33 +0800266 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
267
268 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800269 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun453ad402021-03-17 17:58:33 +0800270 }
271
Mingyang Sun620c8562021-11-10 11:44:58 +0800272 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800273 conn_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800274 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800275
276 if (!conn_handle) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800277 return PSA_ERROR_CONNECTION_BUSY;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800278 }
279
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800280 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800281 handle = tfm_spm_to_user_handle(conn_handle);
282 } else {
283 conn_handle = tfm_spm_to_handle_instance(handle);
284
285 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
286 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
287 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800288 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800289 }
290
291 /*
292 * It is a PROGRAMMER ERROR if the connection is currently
293 * handling a request.
294 */
295 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800296 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800297 }
298
299 /*
300 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
301 * has been terminated by the RoT Service.
302 */
303 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
304 return PSA_ERROR_PROGRAMMER_ERROR;
305 }
306
Ken Liuf39d8eb2021-10-07 12:55:33 +0800307 service = conn_handle->internal_msg.service;
Summer Qin1ce712a2019-10-14 18:04:05 +0800308 }
Shawn Shanb222d892021-01-04 17:41:48 +0800309
David Hu733d8f92019-09-23 15:32:40 +0800310 if (!service) {
311 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800312 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800313 }
314
Mingyang Sune529e3b2021-07-12 14:46:30 +0800315 privileged = tfm_spm_get_caller_privilege_mode();
316
Kevin Pengedb8ee42021-03-09 16:50:11 +0800317 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800318 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800319 * if the memory reference for the wrap input vector is invalid or not
320 * readable.
321 */
322 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800323 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800324 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800325 }
Summer Qinba2346e2019-11-12 16:26:31 +0800326
David Hu733d8f92019-09-23 15:32:40 +0800327 /*
328 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800329 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800330 * the wrap output vector is invalid or not read-write.
331 */
332 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800333 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800334 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800335 }
336
Summer Qinf24dbb52020-07-23 14:53:54 +0800337 spm_memset(invecs, 0, sizeof(invecs));
338 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800339
340 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800341 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
342 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800343
344 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800345 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800346 * memory reference was invalid or not readable.
347 */
348 for (i = 0; i < in_num; i++) {
349 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800350 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800351 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800352 }
353 }
Summer Qinba2346e2019-11-12 16:26:31 +0800354
355 /*
356 * Clients must never overlap input parameters because of the risk of a
357 * double-fetch inconsistency.
358 * Overflow is checked in tfm_memory_check functions.
359 */
360 for (i = 0; i + 1 < in_num; i++) {
361 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100362 if (!((char *) invecs[j].base + invecs[j].len <=
363 (char *) invecs[i].base ||
364 (char *) invecs[j].base >=
365 (char *) invecs[i].base + invecs[i].len)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800366 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qinba2346e2019-11-12 16:26:31 +0800367 }
368 }
369 }
370
David Hu733d8f92019-09-23 15:32:40 +0800371 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800372 * For client output vector, it is a PROGRAMMER ERROR if the provided
373 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800374 */
375 for (i = 0; i < out_num; i++) {
376 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800377 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800378 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800379 }
380 }
381
382 /*
383 * FixMe: Need to check if the message is unrecognized by the RoT
384 * Service or incorrectly formatted.
385 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800386 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
387 if (!msg) {
388 /* FixMe: Need to implement one mechanism to resolve this failure. */
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800389 return PSA_ERROR_PROGRAMMER_ERROR;
Kevin Pengdf6aa292021-03-11 17:58:50 +0800390 }
David Hu733d8f92019-09-23 15:32:40 +0800391
Ken Liu505b1702020-05-29 13:19:58 +0800392 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800393 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800394
Mingyang Sundeae45d2021-09-06 15:31:07 +0800395 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800396}
397
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800398psa_status_t tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800399{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800400 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800401 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800402 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800403 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800404 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800405
406 /* It will have no effect if called with the NULL handle */
407 if (handle == PSA_NULL_HANDLE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800408 return PSA_SUCCESS;
David Hu733d8f92019-09-23 15:32:40 +0800409 }
410
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800411 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800412 if (IS_STATIC_HANDLE(handle)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800413 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800414 }
415
Kevin Peng385fda82021-08-18 10:41:19 +0800416 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800417
Summer Qin373feb12020-03-27 15:35:33 +0800418 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800419 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800420 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
421 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800422 */
Ken Liubcae38b2021-01-20 15:47:44 +0800423 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800424 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qin1ce712a2019-10-14 18:04:05 +0800425 }
Shawn Shanb222d892021-01-04 17:41:48 +0800426
Ken Liuf39d8eb2021-10-07 12:55:33 +0800427 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800428 if (!service) {
429 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800430 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800431 }
432
Kevin Pengdf6aa292021-03-11 17:58:50 +0800433 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
434 if (!msg) {
435 /* FixMe: Need to implement one mechanism to resolve this failure. */
436 tfm_core_panic();
437 }
David Hu733d8f92019-09-23 15:32:40 +0800438
Shawn Shanb222d892021-01-04 17:41:48 +0800439 /*
440 * It is a PROGRAMMER ERROR if the connection is currently handling a
441 * request.
442 */
Summer Qin630c76b2020-05-20 10:32:58 +0800443 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800444 return PSA_ERROR_PROGRAMMER_ERROR;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800445 }
446
David Hu733d8f92019-09-23 15:32:40 +0800447 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800448 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800449 NULL, 0, NULL, 0, NULL);
450
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800451 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800452}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800453
Mingyang Suneeca4652021-07-15 15:19:16 +0800454/* PSA Partition API function body */
455
Mingyang Sunb26b2802021-07-07 11:25:00 +0800456psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
457 uint32_t timeout)
458{
459 struct partition_t *partition = NULL;
460
461 /*
462 * Timeout[30:0] are reserved for future use.
463 * SPM must ignore the value of RES.
464 */
465 timeout &= PSA_TIMEOUT_MASK;
466
467 partition = tfm_spm_get_running_partition();
468 if (!partition) {
469 tfm_core_panic();
470 }
471
472 /*
473 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
474 * signals.
475 */
476 if ((partition->signals_allowed & signal_mask) == 0) {
477 tfm_core_panic();
478 }
479
480 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800481 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800482 * In this case, the return value of this function is temporary set into
483 * runtime context. After new signal(s) are available, the return value
484 * is updated with the available signal(s) and blocked thread gets to run.
485 */
486 if (timeout == PSA_BLOCK &&
487 (partition->signals_asserted & signal_mask) == 0) {
488 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800489 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800490 }
491
492 return partition->signals_asserted & signal_mask;
493}
494
495psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
496{
497 struct tfm_msg_body_t *tmp_msg = NULL;
498 struct partition_t *partition = NULL;
499 uint32_t privileged;
500
501 /*
502 * Only one message could be retrieved every time for psa_get(). It is a
503 * fatal error if the input signal has more than a signal bit set.
504 */
505 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
506 tfm_core_panic();
507 }
508
509 partition = tfm_spm_get_running_partition();
510 if (!partition) {
511 tfm_core_panic();
512 }
513 privileged = tfm_spm_partition_get_privileged_mode(
514 partition->p_ldinf->flags);
515
516 /*
517 * Write the message to the service buffer. It is a fatal error if the
518 * input msg pointer is not a valid memory reference or not read-write.
519 */
520 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
521 privileged) != SPM_SUCCESS) {
522 tfm_core_panic();
523 }
524
525 /*
526 * It is a fatal error if the caller call psa_get() when no message has
527 * been set. The caller must call this function after an RoT Service signal
528 * is returned by psa_wait().
529 */
530 if (partition->signals_asserted == 0) {
531 tfm_core_panic();
532 }
533
534 /*
535 * It is a fatal error if the RoT Service signal is not currently asserted.
536 */
537 if ((partition->signals_asserted & signal) == 0) {
538 tfm_core_panic();
539 }
540
541 /*
542 * Get message by signal from partition. It is a fatal error if getting
543 * failed, which means the input signal is not correspond to an RoT service.
544 */
545 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
546 if (!tmp_msg) {
547 return PSA_ERROR_DOES_NOT_EXIST;
548 }
549
550 (TO_CONTAINER(tmp_msg,
551 struct tfm_conn_handle_t,
552 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
553
554 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
555
556 return PSA_SUCCESS;
557}
558
559void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
560{
561 struct tfm_msg_body_t *msg = NULL;
562 struct tfm_conn_handle_t *conn_handle;
563
564 /* It is a fatal error if message handle is invalid */
565 msg = tfm_spm_get_msg_from_handle(msg_handle);
566 if (!msg) {
567 tfm_core_panic();
568 }
569
570 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
571 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
572 tfm_core_panic();
573 }
574
575 msg->msg.rhandle = rhandle;
576 conn_handle = tfm_spm_to_handle_instance(msg_handle);
577
578 /* Store reverse handle for following client calls. */
579 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
580}
581
582size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
583 void *buffer, size_t num_bytes)
584{
585 size_t bytes;
586 struct tfm_msg_body_t *msg = NULL;
587 uint32_t privileged;
588 struct partition_t *partition = NULL;
589
590 /* It is a fatal error if message handle is invalid */
591 msg = tfm_spm_get_msg_from_handle(msg_handle);
592 if (!msg) {
593 tfm_core_panic();
594 }
595
596 partition = msg->service->partition;
597 privileged = tfm_spm_partition_get_privileged_mode(
598 partition->p_ldinf->flags);
599
600 /*
601 * It is a fatal error if message handle does not refer to a request
602 * message
603 */
604 if (msg->msg.type < PSA_IPC_CALL) {
605 tfm_core_panic();
606 }
607
608 /*
609 * It is a fatal error if invec_idx is equal to or greater than
610 * PSA_MAX_IOVEC
611 */
612 if (invec_idx >= PSA_MAX_IOVEC) {
613 tfm_core_panic();
614 }
615
616 /* There was no remaining data in this input vector */
617 if (msg->msg.in_size[invec_idx] == 0) {
618 return 0;
619 }
620
Shawn Shan038348e2021-09-08 17:11:04 +0800621#if PSA_FRAMEWORK_HAS_MM_IOVEC
622 /*
623 * It is a fatal error if the input vector has already been mapped using
624 * psa_map_invec().
625 */
626 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
627 tfm_core_panic();
628 }
629
630 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
631#endif
632
Mingyang Sunb26b2802021-07-07 11:25:00 +0800633 /*
634 * Copy the client data to the service buffer. It is a fatal error
635 * if the memory reference for buffer is invalid or not read-write.
636 */
637 if (tfm_memory_check(buffer, num_bytes, false,
638 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
639 tfm_core_panic();
640 }
641
642 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
643 msg->msg.in_size[invec_idx] : num_bytes;
644
645 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
646
647 /* There maybe some remaining data */
648 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
649 msg->msg.in_size[invec_idx] -= bytes;
650
651 return bytes;
652}
653
654size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
655 size_t num_bytes)
656{
657 struct tfm_msg_body_t *msg = NULL;
658
659 /* It is a fatal error if message handle is invalid */
660 msg = tfm_spm_get_msg_from_handle(msg_handle);
661 if (!msg) {
662 tfm_core_panic();
663 }
664
665 /*
666 * It is a fatal error if message handle does not refer to a request
667 * message
668 */
669 if (msg->msg.type < PSA_IPC_CALL) {
670 tfm_core_panic();
671 }
672
673 /*
674 * It is a fatal error if invec_idx is equal to or greater than
675 * PSA_MAX_IOVEC
676 */
677 if (invec_idx >= PSA_MAX_IOVEC) {
678 tfm_core_panic();
679 }
680
681 /* There was no remaining data in this input vector */
682 if (msg->msg.in_size[invec_idx] == 0) {
683 return 0;
684 }
685
Shawn Shan038348e2021-09-08 17:11:04 +0800686#if PSA_FRAMEWORK_HAS_MM_IOVEC
687 /*
688 * It is a fatal error if the input vector has already been mapped using
689 * psa_map_invec().
690 */
691 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
692 tfm_core_panic();
693 }
694
695 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
696#endif
697
Mingyang Sunb26b2802021-07-07 11:25:00 +0800698 /*
699 * If num_bytes is greater than the remaining size of the input vector then
700 * the remaining size of the input vector is used.
701 */
702 if (num_bytes > msg->msg.in_size[invec_idx]) {
703 num_bytes = msg->msg.in_size[invec_idx];
704 }
705
706 /* There maybe some remaining data */
707 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
708 num_bytes;
709 msg->msg.in_size[invec_idx] -= num_bytes;
710
711 return num_bytes;
712}
713
714void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
715 const void *buffer, size_t num_bytes)
716{
717 struct tfm_msg_body_t *msg = NULL;
718 uint32_t privileged;
719 struct partition_t *partition = NULL;
720
721 /* It is a fatal error if message handle is invalid */
722 msg = tfm_spm_get_msg_from_handle(msg_handle);
723 if (!msg) {
724 tfm_core_panic();
725 }
726
727 partition = msg->service->partition;
728 privileged = tfm_spm_partition_get_privileged_mode(
729 partition->p_ldinf->flags);
730
731 /*
732 * It is a fatal error if message handle does not refer to a request
733 * message
734 */
735 if (msg->msg.type < PSA_IPC_CALL) {
736 tfm_core_panic();
737 }
738
739 /*
740 * It is a fatal error if outvec_idx is equal to or greater than
741 * PSA_MAX_IOVEC
742 */
743 if (outvec_idx >= PSA_MAX_IOVEC) {
744 tfm_core_panic();
745 }
746
747 /*
748 * It is a fatal error if the call attempts to write data past the end of
749 * the client output vector
750 */
751 if (num_bytes > msg->msg.out_size[outvec_idx] -
752 msg->outvec[outvec_idx].len) {
753 tfm_core_panic();
754 }
755
Shawn Shan038348e2021-09-08 17:11:04 +0800756#if PSA_FRAMEWORK_HAS_MM_IOVEC
757 /*
758 * It is a fatal error if the output vector has already been mapped using
759 * psa_map_outvec().
760 */
761 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
762 tfm_core_panic();
763 }
764
765 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
766#endif
767
Mingyang Sunb26b2802021-07-07 11:25:00 +0800768 /*
769 * Copy the service buffer to client outvecs. It is a fatal error
770 * if the memory reference for buffer is invalid or not readable.
771 */
772 if (tfm_memory_check(buffer, num_bytes, false,
773 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
774 tfm_core_panic();
775 }
776
777 spm_memcpy((char *)msg->outvec[outvec_idx].base +
778 msg->outvec[outvec_idx].len, buffer, num_bytes);
779
780 /* Update the write number */
781 msg->outvec[outvec_idx].len += num_bytes;
782}
783
Ken Liuf39d8eb2021-10-07 12:55:33 +0800784int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
785 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800786{
787 struct service_t *service = NULL;
788 struct tfm_msg_body_t *msg = NULL;
789 int32_t ret = PSA_SUCCESS;
790 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun620c8562021-11-10 11:44:58 +0800791 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800792
793 /* It is a fatal error if message handle is invalid */
794 msg = tfm_spm_get_msg_from_handle(msg_handle);
795 if (!msg) {
796 tfm_core_panic();
797 }
798
799 /*
800 * RoT Service information is needed in this function, stored it in message
801 * body structure. Only two parameters are passed in this function: handle
802 * and status, so it is useful and simply to do like this.
803 */
804 service = msg->service;
805 if (!service) {
806 tfm_core_panic();
807 }
808
809 /*
810 * Three type of message are passed in this function: CONNECTION, REQUEST,
811 * DISCONNECTION. It needs to process differently for each type.
812 */
813 conn_handle = tfm_spm_to_handle_instance(msg_handle);
814 switch (msg->msg.type) {
815 case PSA_IPC_CONNECT:
816 /*
817 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
818 * input status is PSA_SUCCESS. Others return values are based on the
819 * input status.
820 */
821 if (status == PSA_SUCCESS) {
822 ret = msg_handle;
823 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
824 /* Refuse the client connection, indicating a permanent error. */
825 tfm_spm_free_conn_handle(service, conn_handle);
826 ret = PSA_ERROR_CONNECTION_REFUSED;
827 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
828 /* Fail the client connection, indicating a transient error. */
829 ret = PSA_ERROR_CONNECTION_BUSY;
830 } else {
831 tfm_core_panic();
832 }
833 break;
834 case PSA_IPC_DISCONNECT:
835 /* Service handle is not used anymore */
836 tfm_spm_free_conn_handle(service, conn_handle);
837
838 /*
839 * If the message type is PSA_IPC_DISCONNECT, then the status code is
840 * ignored
841 */
842 break;
843 default:
844 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800845
846#if PSA_FRAMEWORK_HAS_MM_IOVEC
847
848 /*
849 * If the unmapped function is not called for an input/output vector
850 * that has been mapped, the framework will remove the mapping.
851 */
852 int i;
853
854 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
855 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
856 SET_IOVEC_UNMAPPED(msg, i);
857 /*
858 * Any output vectors that are still mapped will report that
859 * zero bytes have been written.
860 */
861 if (i >= OUTVEC_IDX_BASE) {
862 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
863 }
864 }
865 }
866
867#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800868 /* Reply to a request message. Return values are based on status */
869 ret = status;
870 /*
871 * The total number of bytes written to a single parameter must be
872 * reported to the client by updating the len member of the
873 * psa_outvec structure for the parameter before returning from
874 * psa_call().
875 */
876 update_caller_outvec_len(msg);
877 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
878 tfm_spm_free_conn_handle(service, conn_handle);
879 }
880 } else {
881 tfm_core_panic();
882 }
883 }
884
885 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
886 /*
887 * If the source of the programmer error is a Secure Partition, the SPM
888 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
889 */
890 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
891 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
892 } else {
893 tfm_core_panic();
894 }
895 } else {
896 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
897 }
898
Mingyang Sun620c8562021-11-10 11:44:58 +0800899 /*
900 * TODO: It can be optimized further by moving critical section protection
901 * to mailbox. Also need to check implementation when secure context is
902 * involved.
903 */
904 CRITICAL_SECTION_ENTER(cs_assert);
905 ret = backend_instance.replying(msg, ret);
906 CRITICAL_SECTION_LEAVE(cs_assert);
907
908 return ret;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800909}
910
911void tfm_spm_partition_psa_notify(int32_t partition_id)
912{
Ken Liu5d73c872021-08-19 19:23:17 +0800913 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
914
915 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800916}
917
918void tfm_spm_partition_psa_clear(void)
919{
Ken Liu92ede9f2021-10-20 09:35:00 +0800920 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800921 struct partition_t *partition = NULL;
922
923 partition = tfm_spm_get_running_partition();
924 if (!partition) {
925 tfm_core_panic();
926 }
927
928 /*
929 * It is a fatal error if the Secure Partition's doorbell signal is not
930 * currently asserted.
931 */
932 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
933 tfm_core_panic();
934 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800935
936 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800937 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800938 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800939}
940
941void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
942{
Ken Liu92ede9f2021-10-20 09:35:00 +0800943 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800944 struct irq_load_info_t *irq_info = NULL;
945 struct partition_t *partition = NULL;
946
947 partition = tfm_spm_get_running_partition();
948 if (!partition) {
949 tfm_core_panic();
950 }
951
952 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
953 /* It is a fatal error if passed signal is not an interrupt signal. */
954 if (!irq_info) {
955 tfm_core_panic();
956 }
957
958 if (irq_info->flih_func) {
959 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800960 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800961 }
962
963 /* It is a fatal error if passed signal is not currently asserted */
964 if ((partition->signals_asserted & irq_signal) == 0) {
965 tfm_core_panic();
966 }
967
Ken Liu92ede9f2021-10-20 09:35:00 +0800968 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800969 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800970 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800971
Kevin Pengd399a1f2021-09-08 15:33:14 +0800972 tfm_hal_irq_clear_pending(irq_info->source);
973 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800974}
975
976void tfm_spm_partition_psa_panic(void)
977{
978 /*
979 * PSA FF recommends that the SPM causes the system to restart when a secure
980 * partition panics.
981 */
982 tfm_hal_system_reset();
983}
984
Kevin Peng67a89fd2021-11-25 11:22:02 +0800985void tfm_spm_partition_psa_irq_enable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800986{
987 struct partition_t *partition;
988 struct irq_load_info_t *irq_info;
989
990 partition = tfm_spm_get_running_partition();
991 if (!partition) {
992 tfm_core_panic();
993 }
994
995 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
996 if (!irq_info) {
997 tfm_core_panic();
998 }
999
Kevin Pengd399a1f2021-09-08 15:33:14 +08001000 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001001}
1002
Kevin Peng67a89fd2021-11-25 11:22:02 +08001003psa_irq_status_t tfm_spm_partition_psa_irq_disable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +08001004{
1005 struct partition_t *partition;
1006 struct irq_load_info_t *irq_info;
1007
1008 partition = tfm_spm_get_running_partition();
1009 if (!partition) {
1010 tfm_core_panic();
1011 }
1012
1013 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1014 if (!irq_info) {
1015 tfm_core_panic();
1016 }
1017
Kevin Pengd399a1f2021-09-08 15:33:14 +08001018 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001019
1020 return 1;
1021}
1022
1023void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
1024{
Ken Liu92ede9f2021-10-20 09:35:00 +08001025 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +08001026 struct irq_load_info_t *irq_info;
1027 struct partition_t *partition;
1028
1029 partition = tfm_spm_get_running_partition();
1030 if (!partition) {
1031 tfm_core_panic();
1032 }
1033
1034 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1035 if (!irq_info) {
1036 tfm_core_panic();
1037 }
1038
1039 if (!irq_info->flih_func) {
1040 /* This API is for FLIH IRQs only */
1041 tfm_core_panic();
1042 }
1043
1044 if ((partition->signals_asserted & irq_signal) == 0) {
1045 /* The signal is not asserted */
1046 tfm_core_panic();
1047 }
1048
Ken Liu92ede9f2021-10-20 09:35:00 +08001049 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001050 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001051 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001052}
Shawn Shan038348e2021-09-08 17:11:04 +08001053
1054#if PSA_FRAMEWORK_HAS_MM_IOVEC
1055
1056const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1057 uint32_t invec_idx)
1058{
1059 struct tfm_msg_body_t *msg = NULL;
1060 uint32_t privileged;
1061 struct partition_t *partition = NULL;
1062
1063 /* It is a fatal error if message handle is invalid */
1064 msg = tfm_spm_get_msg_from_handle(msg_handle);
1065 if (!msg) {
1066 tfm_core_panic();
1067 }
1068
1069 partition = msg->service->partition;
1070 privileged = tfm_spm_partition_get_privileged_mode(
1071 partition->p_ldinf->flags);
1072
1073 /*
1074 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1075 * Service that received the message.
1076 */
1077 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1078 tfm_core_panic();
1079 }
1080
1081 /*
1082 * It is a fatal error if message handle does not refer to a request
1083 * message.
1084 */
1085 if (msg->msg.type < PSA_IPC_CALL) {
1086 tfm_core_panic();
1087 }
1088
1089 /*
1090 * It is a fatal error if invec_idx is equal to or greater than
1091 * PSA_MAX_IOVEC.
1092 */
1093 if (invec_idx >= PSA_MAX_IOVEC) {
1094 tfm_core_panic();
1095 }
1096
1097 /* It is a fatal error if the input vector has length zero. */
1098 if (msg->msg.in_size[invec_idx] == 0) {
1099 tfm_core_panic();
1100 }
1101
1102 /*
1103 * It is a fatal error if the input vector has already been mapped using
1104 * psa_map_invec().
1105 */
1106 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1107 tfm_core_panic();
1108 }
1109
1110 /*
1111 * It is a fatal error if the input vector has already been accessed
1112 * using psa_read() or psa_skip().
1113 */
1114 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1115 tfm_core_panic();
1116 }
1117
1118 /*
1119 * It is a fatal error if the memory reference for the wrap input vector is
1120 * invalid or not readable.
1121 */
1122 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1123 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1124 tfm_core_panic();
1125 }
1126
1127 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1128
1129 return msg->invec[invec_idx].base;
1130}
1131
1132void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1133 uint32_t invec_idx)
1134{
1135 struct tfm_msg_body_t *msg = NULL;
1136
1137 /* It is a fatal error if message handle is invalid */
1138 msg = tfm_spm_get_msg_from_handle(msg_handle);
1139 if (!msg) {
1140 tfm_core_panic();
1141 }
1142
1143 /*
1144 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1145 * Service that received the message.
1146 */
1147 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1148 tfm_core_panic();
1149 }
1150
1151 /*
1152 * It is a fatal error if message handle does not refer to a request
1153 * message.
1154 */
1155 if (msg->msg.type < PSA_IPC_CALL) {
1156 tfm_core_panic();
1157 }
1158
1159 /*
1160 * It is a fatal error if invec_idx is equal to or greater than
1161 * PSA_MAX_IOVEC.
1162 */
1163 if (invec_idx >= PSA_MAX_IOVEC) {
1164 tfm_core_panic();
1165 }
1166
1167 /*
1168 * It is a fatal error if The input vector has not been mapped by a call to
1169 * psa_map_invec().
1170 */
1171 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1172 tfm_core_panic();
1173 }
1174
1175 /*
1176 * It is a fatal error if the input vector has already been unmapped by a
1177 * call to psa_unmap_invec().
1178 */
1179 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1180 tfm_core_panic();
1181 }
1182
1183 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1184}
1185
1186void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1187 uint32_t outvec_idx)
1188{
1189 struct tfm_msg_body_t *msg = NULL;
1190 uint32_t privileged;
1191 struct partition_t *partition = NULL;
1192
1193 /* It is a fatal error if message handle is invalid */
1194 msg = tfm_spm_get_msg_from_handle(msg_handle);
1195 if (!msg) {
1196 tfm_core_panic();
1197 }
1198
1199 partition = msg->service->partition;
1200 privileged = tfm_spm_partition_get_privileged_mode(
1201 partition->p_ldinf->flags);
1202
1203 /*
1204 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1205 * Service that received the message.
1206 */
1207 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1208 tfm_core_panic();
1209 }
1210
1211 /*
1212 * It is a fatal error if message handle does not refer to a request
1213 * message.
1214 */
1215 if (msg->msg.type < PSA_IPC_CALL) {
1216 tfm_core_panic();
1217 }
1218
1219 /*
1220 * It is a fatal error if outvec_idx is equal to or greater than
1221 * PSA_MAX_IOVEC.
1222 */
1223 if (outvec_idx >= PSA_MAX_IOVEC) {
1224 tfm_core_panic();
1225 }
1226
1227 /* It is a fatal error if the output vector has length zero. */
1228 if (msg->msg.out_size[outvec_idx] == 0) {
1229 tfm_core_panic();
1230 }
1231
1232 /*
1233 * It is a fatal error if the output vector has already been mapped using
1234 * psa_map_outvec().
1235 */
1236 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1237 tfm_core_panic();
1238 }
1239
1240 /*
1241 * It is a fatal error if the output vector has already been accessed
1242 * using psa_write().
1243 */
1244 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1245 tfm_core_panic();
1246 }
1247
1248 /*
1249 * It is a fatal error if the output vector is invalid or not read-write.
1250 */
1251 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1252 msg->outvec[outvec_idx].len, false,
1253 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1254 tfm_core_panic();
1255 }
1256 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1257
1258 return msg->outvec[outvec_idx].base;
1259}
1260
1261void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1262 uint32_t outvec_idx, size_t len)
1263{
1264 struct tfm_msg_body_t *msg = NULL;
1265
1266 /* It is a fatal error if message handle is invalid */
1267 msg = tfm_spm_get_msg_from_handle(msg_handle);
1268 if (!msg) {
1269 tfm_core_panic();
1270 }
1271
1272 /*
1273 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1274 * Service that received the message.
1275 */
1276 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1277 tfm_core_panic();
1278 }
1279
1280 /*
1281 * It is a fatal error if message handle does not refer to a request
1282 * message.
1283 */
1284 if (msg->msg.type < PSA_IPC_CALL) {
1285 tfm_core_panic();
1286 }
1287
1288 /*
1289 * It is a fatal error if outvec_idx is equal to or greater than
1290 * PSA_MAX_IOVEC.
1291 */
1292 if (outvec_idx >= PSA_MAX_IOVEC) {
1293 tfm_core_panic();
1294 }
1295
1296 /*
1297 * It is a fatal error if len is greater than the output vector size.
1298 */
1299 if (len > msg->msg.out_size[outvec_idx]) {
1300 tfm_core_panic();
1301 }
1302
1303 /*
1304 * It is a fatal error if The output vector has not been mapped by a call to
1305 * psa_map_outvec().
1306 */
1307 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1308 tfm_core_panic();
1309 }
1310
1311 /*
1312 * It is a fatal error if the output vector has already been unmapped by a
1313 * call to psa_unmap_outvec().
1314 */
1315 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1316 tfm_core_panic();
1317 }
1318
1319 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1320
1321 /* Update the write number */
1322 msg->outvec[outvec_idx].len = len;
1323}
1324
1325#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */