blob: 561f9ef7bb9d16a4d2768e7e452720d683d01db1 [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Shawn Shanb222d892021-01-04 17:41:48 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080013#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080014#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080015#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080016#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080017#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080018#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080019#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080020#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080021#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080022#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080023#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080024#include "tfm_rpc.h"
25#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080026#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080027#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080028#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080029
Ken Liub3b2cb62021-05-22 00:39:28 +080030#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080031extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080032
Shawn Shan038348e2021-09-08 17:11:04 +080033#if PSA_FRAMEWORK_HAS_MM_IOVEC
34
35/*
36 * The MM-IOVEC status
37 * The max total number of invec and outvec is 8.
38 * Each invec/outvec takes 4 bit, 32 bits in total.
39 *
40 * The encoding format of the MM-IOVEC status:
41 *--------------------------------------------------------------
42 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
43 *--------------------------------------------------------------
44 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
45 *--------------------------------------------------------------
46 *
47 * Take invec[0] as an example:
48 *
49 * bit 0: whether invec[0] has been mapped.
50 * bit 1: whether invec[0] has been unmapped.
51 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
52 * psa_write().
53 * bit 3: reserved for invec[0].
54 */
55
56#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
57#define OUTVEC_IDX_BASE 4 /*
58 * Base index of outvec.
59 * There are four invecs in front of
60 * outvec.
61 */
62#define INVEC_IDX_BASE 0 /* Base index of invec. */
63
64#define IOVEC_MAPPED_BIT (1U << 0)
65#define IOVEC_UNMAPPED_BIT (1U << 1)
66#define IOVEC_ACCESSED_BIT (1U << 2)
67
68#define IOVEC_IS_MAPPED(msg, iovec_idx) \
69 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
70 IOVEC_MAPPED_BIT)
71#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
72 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
73 IOVEC_UNMAPPED_BIT)
74#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
75 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
76 IOVEC_ACCESSED_BIT)
77#define SET_IOVEC_MAPPED(msg, iovec_idx) \
78 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
79 ((iovec_idx) * IOVEC_STATUS_BITS)))
80#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
81 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
82 ((iovec_idx) * IOVEC_STATUS_BITS)))
83#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
84 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
85 ((iovec_idx) * IOVEC_STATUS_BITS)))
86
87#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
88
Mingyang Suneeca4652021-07-15 15:19:16 +080089uint32_t tfm_spm_get_lifecycle_state(void)
90{
91 /*
92 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
93 * implemented in the future.
94 */
95 return PSA_LIFECYCLE_UNKNOWN;
96}
97
98/* PSA Client API function body */
99
Mingyang Sund44522a2020-01-16 16:48:37 +0800100uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800101{
102 return PSA_FRAMEWORK_VERSION;
103}
104
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800105uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800106{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800107 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800108 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800109
110 /*
111 * It should return PSA_VERSION_NONE if the RoT Service is not
112 * implemented.
113 */
114 service = tfm_spm_get_service_by_sid(sid);
115 if (!service) {
116 return PSA_VERSION_NONE;
117 }
118
119 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800120 * It should return PSA_VERSION_NONE if the caller is not authorized
121 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800122 */
Ken Liubcae38b2021-01-20 15:47:44 +0800123 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800124 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800125 }
126
Ken Liuacd2a572021-05-12 16:19:04 +0800127 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800128}
129
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800130psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
David Hu733d8f92019-09-23 15:32:40 +0800131{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800132 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800133 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800134 struct tfm_conn_handle_t *connect_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800135 int32_t client_id;
Ken Liu505b1702020-05-29 13:19:58 +0800136 psa_handle_t handle;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800137 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800138
Kevin Pengedb8ee42021-03-09 16:50:11 +0800139 /*
140 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
141 * platform.
142 */
David Hu733d8f92019-09-23 15:32:40 +0800143 service = tfm_spm_get_service_by_sid(sid);
144 if (!service) {
Shawn Shanb222d892021-01-04 17:41:48 +0800145 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
David Hu733d8f92019-09-23 15:32:40 +0800146 }
147
Mingyang Sunef42f442021-06-11 15:07:58 +0800148 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
149 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
150 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
151 }
152
Kevin Pengedb8ee42021-03-09 16:50:11 +0800153 /*
154 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
155 * RoT Service.
156 */
157 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
158 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
159 }
160
161 /*
162 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
163 * not supported on the platform.
164 */
165 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
166 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
167 }
168
Kevin Peng385fda82021-08-18 10:41:19 +0800169 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800170
David Hu733d8f92019-09-23 15:32:40 +0800171 /*
172 * Create connection handle here since it is possible to return the error
173 * code to client when creation fails.
174 */
Summer Qin1ce712a2019-10-14 18:04:05 +0800175 connect_handle = tfm_spm_create_conn_handle(service, client_id);
Summer Qin630c76b2020-05-20 10:32:58 +0800176 if (!connect_handle) {
David Hu733d8f92019-09-23 15:32:40 +0800177 return PSA_ERROR_CONNECTION_BUSY;
178 }
179
Kevin Pengdf6aa292021-03-11 17:58:50 +0800180 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
181 if (!msg) {
182 /* Have no enough resource to create message */
183 return PSA_ERROR_CONNECTION_BUSY;
184 }
David Hu733d8f92019-09-23 15:32:40 +0800185
Ken Liu505b1702020-05-29 13:19:58 +0800186 handle = tfm_spm_to_user_handle(connect_handle);
David Hu733d8f92019-09-23 15:32:40 +0800187 /* No input or output needed for connect message */
Ken Liu505b1702020-05-29 13:19:58 +0800188 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
Summer Qin1ce712a2019-10-14 18:04:05 +0800189 client_id, NULL, 0, NULL, 0, NULL);
David Hu733d8f92019-09-23 15:32:40 +0800190
David Hu733d8f92019-09-23 15:32:40 +0800191
Mingyang Sundeae45d2021-09-06 15:31:07 +0800192 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800193}
194
Mingyang Suneeca4652021-07-15 15:19:16 +0800195psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
196 uint32_t ctrl_param,
197 const psa_invec *inptr,
198 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800199{
200 psa_invec invecs[PSA_MAX_IOVEC];
201 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800202 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800203 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800204 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800205 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800206 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800207 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800208 uint32_t privileged;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800209 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800210 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
211 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
212 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800213
214 /* The request type must be zero or positive. */
215 if (type < 0) {
216 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
217 }
David Hu733d8f92019-09-23 15:32:40 +0800218
Shawn Shanb222d892021-01-04 17:41:48 +0800219 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800220 if ((in_num > PSA_MAX_IOVEC) ||
221 (out_num > PSA_MAX_IOVEC) ||
222 (in_num + out_num > PSA_MAX_IOVEC)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800223 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800224 }
225
Kevin Peng385fda82021-08-18 10:41:19 +0800226 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800227
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800228 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800229 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800230 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800231
232 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
233 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
234 }
235
Mingyang Sun453ad402021-03-17 17:58:33 +0800236 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800237 if (!service) {
238 tfm_core_panic();
239 }
240
Ken Liub3b2cb62021-05-22 00:39:28 +0800241 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800242
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800243 /*
244 * It is a PROGRAMMER ERROR if the caller is not authorized to access
245 * the RoT Service.
246 */
247 if (tfm_spm_check_authorization(sid, service, ns_caller)
248 != SPM_SUCCESS) {
249 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
250 }
251
Mingyang Sun453ad402021-03-17 17:58:33 +0800252 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
253
254 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
255 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
256 }
257
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800258 conn_handle = tfm_spm_create_conn_handle(service, client_id);
259
260 if (!conn_handle) {
261 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_BUSY);
262 }
263
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800264 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800265 handle = tfm_spm_to_user_handle(conn_handle);
266 } else {
267 conn_handle = tfm_spm_to_handle_instance(handle);
268
269 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
270 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
271 != SPM_SUCCESS) {
272 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
273 }
274
275 /*
276 * It is a PROGRAMMER ERROR if the connection is currently
277 * handling a request.
278 */
279 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
280 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
281 }
282
283 /*
284 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
285 * has been terminated by the RoT Service.
286 */
287 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
288 return PSA_ERROR_PROGRAMMER_ERROR;
289 }
290
Ken Liuf39d8eb2021-10-07 12:55:33 +0800291 service = conn_handle->internal_msg.service;
Summer Qin1ce712a2019-10-14 18:04:05 +0800292 }
Shawn Shanb222d892021-01-04 17:41:48 +0800293
David Hu733d8f92019-09-23 15:32:40 +0800294 if (!service) {
295 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800296 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800297 }
298
Mingyang Sune529e3b2021-07-12 14:46:30 +0800299 privileged = tfm_spm_get_caller_privilege_mode();
300
Kevin Pengedb8ee42021-03-09 16:50:11 +0800301 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800302 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800303 * if the memory reference for the wrap input vector is invalid or not
304 * readable.
305 */
306 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800307 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800308 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800309 }
Summer Qinba2346e2019-11-12 16:26:31 +0800310
David Hu733d8f92019-09-23 15:32:40 +0800311 /*
312 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800313 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800314 * the wrap output vector is invalid or not read-write.
315 */
316 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800317 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800318 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800319 }
320
Summer Qinf24dbb52020-07-23 14:53:54 +0800321 spm_memset(invecs, 0, sizeof(invecs));
322 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800323
324 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800325 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
326 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800327
328 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800329 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800330 * memory reference was invalid or not readable.
331 */
332 for (i = 0; i < in_num; i++) {
333 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800334 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800335 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800336 }
337 }
Summer Qinba2346e2019-11-12 16:26:31 +0800338
339 /*
340 * Clients must never overlap input parameters because of the risk of a
341 * double-fetch inconsistency.
342 * Overflow is checked in tfm_memory_check functions.
343 */
344 for (i = 0; i + 1 < in_num; i++) {
345 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100346 if (!((char *) invecs[j].base + invecs[j].len <=
347 (char *) invecs[i].base ||
348 (char *) invecs[j].base >=
349 (char *) invecs[i].base + invecs[i].len)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800350 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
Summer Qinba2346e2019-11-12 16:26:31 +0800351 }
352 }
353 }
354
David Hu733d8f92019-09-23 15:32:40 +0800355 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800356 * For client output vector, it is a PROGRAMMER ERROR if the provided
357 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800358 */
359 for (i = 0; i < out_num; i++) {
360 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800361 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800362 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800363 }
364 }
365
366 /*
367 * FixMe: Need to check if the message is unrecognized by the RoT
368 * Service or incorrectly formatted.
369 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800370 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
371 if (!msg) {
372 /* FixMe: Need to implement one mechanism to resolve this failure. */
373 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
374 }
David Hu733d8f92019-09-23 15:32:40 +0800375
Ken Liu505b1702020-05-29 13:19:58 +0800376 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800377 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800378
Mingyang Sundeae45d2021-09-06 15:31:07 +0800379 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800380}
381
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800382void tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800383{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800384 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800385 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800386 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800387 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800388 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800389
390 /* It will have no effect if called with the NULL handle */
391 if (handle == PSA_NULL_HANDLE) {
392 return;
393 }
394
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800395 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800396 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800397 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
398 }
399
Kevin Peng385fda82021-08-18 10:41:19 +0800400 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800401
Summer Qin373feb12020-03-27 15:35:33 +0800402 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800403 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800404 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
405 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800406 */
Ken Liubcae38b2021-01-20 15:47:44 +0800407 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800408 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Summer Qin1ce712a2019-10-14 18:04:05 +0800409 }
Shawn Shanb222d892021-01-04 17:41:48 +0800410
Ken Liuf39d8eb2021-10-07 12:55:33 +0800411 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800412 if (!service) {
413 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800414 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800415 }
416
Kevin Pengdf6aa292021-03-11 17:58:50 +0800417 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
418 if (!msg) {
419 /* FixMe: Need to implement one mechanism to resolve this failure. */
420 tfm_core_panic();
421 }
David Hu733d8f92019-09-23 15:32:40 +0800422
Shawn Shanb222d892021-01-04 17:41:48 +0800423 /*
424 * It is a PROGRAMMER ERROR if the connection is currently handling a
425 * request.
426 */
Summer Qin630c76b2020-05-20 10:32:58 +0800427 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Shawn Shanb222d892021-01-04 17:41:48 +0800428 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Shawn Shancc39fcb2019-11-13 15:38:16 +0800429 }
430
David Hu733d8f92019-09-23 15:32:40 +0800431 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800432 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800433 NULL, 0, NULL, 0, NULL);
434
Mingyang Sundeae45d2021-09-06 15:31:07 +0800435 (void)backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800436}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800437
Mingyang Suneeca4652021-07-15 15:19:16 +0800438/* PSA Partition API function body */
439
Mingyang Sunb26b2802021-07-07 11:25:00 +0800440psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
441 uint32_t timeout)
442{
443 struct partition_t *partition = NULL;
444
445 /*
446 * Timeout[30:0] are reserved for future use.
447 * SPM must ignore the value of RES.
448 */
449 timeout &= PSA_TIMEOUT_MASK;
450
451 partition = tfm_spm_get_running_partition();
452 if (!partition) {
453 tfm_core_panic();
454 }
455
456 /*
457 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
458 * signals.
459 */
460 if ((partition->signals_allowed & signal_mask) == 0) {
461 tfm_core_panic();
462 }
463
464 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800465 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800466 * In this case, the return value of this function is temporary set into
467 * runtime context. After new signal(s) are available, the return value
468 * is updated with the available signal(s) and blocked thread gets to run.
469 */
470 if (timeout == PSA_BLOCK &&
471 (partition->signals_asserted & signal_mask) == 0) {
472 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800473 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800474 }
475
476 return partition->signals_asserted & signal_mask;
477}
478
479psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
480{
481 struct tfm_msg_body_t *tmp_msg = NULL;
482 struct partition_t *partition = NULL;
483 uint32_t privileged;
484
485 /*
486 * Only one message could be retrieved every time for psa_get(). It is a
487 * fatal error if the input signal has more than a signal bit set.
488 */
489 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
490 tfm_core_panic();
491 }
492
493 partition = tfm_spm_get_running_partition();
494 if (!partition) {
495 tfm_core_panic();
496 }
497 privileged = tfm_spm_partition_get_privileged_mode(
498 partition->p_ldinf->flags);
499
500 /*
501 * Write the message to the service buffer. It is a fatal error if the
502 * input msg pointer is not a valid memory reference or not read-write.
503 */
504 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
505 privileged) != SPM_SUCCESS) {
506 tfm_core_panic();
507 }
508
509 /*
510 * It is a fatal error if the caller call psa_get() when no message has
511 * been set. The caller must call this function after an RoT Service signal
512 * is returned by psa_wait().
513 */
514 if (partition->signals_asserted == 0) {
515 tfm_core_panic();
516 }
517
518 /*
519 * It is a fatal error if the RoT Service signal is not currently asserted.
520 */
521 if ((partition->signals_asserted & signal) == 0) {
522 tfm_core_panic();
523 }
524
525 /*
526 * Get message by signal from partition. It is a fatal error if getting
527 * failed, which means the input signal is not correspond to an RoT service.
528 */
529 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
530 if (!tmp_msg) {
531 return PSA_ERROR_DOES_NOT_EXIST;
532 }
533
534 (TO_CONTAINER(tmp_msg,
535 struct tfm_conn_handle_t,
536 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
537
538 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
539
540 return PSA_SUCCESS;
541}
542
543void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
544{
545 struct tfm_msg_body_t *msg = NULL;
546 struct tfm_conn_handle_t *conn_handle;
547
548 /* It is a fatal error if message handle is invalid */
549 msg = tfm_spm_get_msg_from_handle(msg_handle);
550 if (!msg) {
551 tfm_core_panic();
552 }
553
554 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
555 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
556 tfm_core_panic();
557 }
558
559 msg->msg.rhandle = rhandle;
560 conn_handle = tfm_spm_to_handle_instance(msg_handle);
561
562 /* Store reverse handle for following client calls. */
563 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
564}
565
566size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
567 void *buffer, size_t num_bytes)
568{
569 size_t bytes;
570 struct tfm_msg_body_t *msg = NULL;
571 uint32_t privileged;
572 struct partition_t *partition = NULL;
573
574 /* It is a fatal error if message handle is invalid */
575 msg = tfm_spm_get_msg_from_handle(msg_handle);
576 if (!msg) {
577 tfm_core_panic();
578 }
579
580 partition = msg->service->partition;
581 privileged = tfm_spm_partition_get_privileged_mode(
582 partition->p_ldinf->flags);
583
584 /*
585 * It is a fatal error if message handle does not refer to a request
586 * message
587 */
588 if (msg->msg.type < PSA_IPC_CALL) {
589 tfm_core_panic();
590 }
591
592 /*
593 * It is a fatal error if invec_idx is equal to or greater than
594 * PSA_MAX_IOVEC
595 */
596 if (invec_idx >= PSA_MAX_IOVEC) {
597 tfm_core_panic();
598 }
599
600 /* There was no remaining data in this input vector */
601 if (msg->msg.in_size[invec_idx] == 0) {
602 return 0;
603 }
604
Shawn Shan038348e2021-09-08 17:11:04 +0800605#if PSA_FRAMEWORK_HAS_MM_IOVEC
606 /*
607 * It is a fatal error if the input vector has already been mapped using
608 * psa_map_invec().
609 */
610 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
611 tfm_core_panic();
612 }
613
614 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
615#endif
616
Mingyang Sunb26b2802021-07-07 11:25:00 +0800617 /*
618 * Copy the client data to the service buffer. It is a fatal error
619 * if the memory reference for buffer is invalid or not read-write.
620 */
621 if (tfm_memory_check(buffer, num_bytes, false,
622 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
623 tfm_core_panic();
624 }
625
626 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
627 msg->msg.in_size[invec_idx] : num_bytes;
628
629 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
630
631 /* There maybe some remaining data */
632 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
633 msg->msg.in_size[invec_idx] -= bytes;
634
635 return bytes;
636}
637
638size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
639 size_t num_bytes)
640{
641 struct tfm_msg_body_t *msg = NULL;
642
643 /* It is a fatal error if message handle is invalid */
644 msg = tfm_spm_get_msg_from_handle(msg_handle);
645 if (!msg) {
646 tfm_core_panic();
647 }
648
649 /*
650 * It is a fatal error if message handle does not refer to a request
651 * message
652 */
653 if (msg->msg.type < PSA_IPC_CALL) {
654 tfm_core_panic();
655 }
656
657 /*
658 * It is a fatal error if invec_idx is equal to or greater than
659 * PSA_MAX_IOVEC
660 */
661 if (invec_idx >= PSA_MAX_IOVEC) {
662 tfm_core_panic();
663 }
664
665 /* There was no remaining data in this input vector */
666 if (msg->msg.in_size[invec_idx] == 0) {
667 return 0;
668 }
669
Shawn Shan038348e2021-09-08 17:11:04 +0800670#if PSA_FRAMEWORK_HAS_MM_IOVEC
671 /*
672 * It is a fatal error if the input vector has already been mapped using
673 * psa_map_invec().
674 */
675 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
676 tfm_core_panic();
677 }
678
679 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
680#endif
681
Mingyang Sunb26b2802021-07-07 11:25:00 +0800682 /*
683 * If num_bytes is greater than the remaining size of the input vector then
684 * the remaining size of the input vector is used.
685 */
686 if (num_bytes > msg->msg.in_size[invec_idx]) {
687 num_bytes = msg->msg.in_size[invec_idx];
688 }
689
690 /* There maybe some remaining data */
691 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
692 num_bytes;
693 msg->msg.in_size[invec_idx] -= num_bytes;
694
695 return num_bytes;
696}
697
698void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
699 const void *buffer, size_t num_bytes)
700{
701 struct tfm_msg_body_t *msg = NULL;
702 uint32_t privileged;
703 struct partition_t *partition = NULL;
704
705 /* It is a fatal error if message handle is invalid */
706 msg = tfm_spm_get_msg_from_handle(msg_handle);
707 if (!msg) {
708 tfm_core_panic();
709 }
710
711 partition = msg->service->partition;
712 privileged = tfm_spm_partition_get_privileged_mode(
713 partition->p_ldinf->flags);
714
715 /*
716 * It is a fatal error if message handle does not refer to a request
717 * message
718 */
719 if (msg->msg.type < PSA_IPC_CALL) {
720 tfm_core_panic();
721 }
722
723 /*
724 * It is a fatal error if outvec_idx is equal to or greater than
725 * PSA_MAX_IOVEC
726 */
727 if (outvec_idx >= PSA_MAX_IOVEC) {
728 tfm_core_panic();
729 }
730
731 /*
732 * It is a fatal error if the call attempts to write data past the end of
733 * the client output vector
734 */
735 if (num_bytes > msg->msg.out_size[outvec_idx] -
736 msg->outvec[outvec_idx].len) {
737 tfm_core_panic();
738 }
739
Shawn Shan038348e2021-09-08 17:11:04 +0800740#if PSA_FRAMEWORK_HAS_MM_IOVEC
741 /*
742 * It is a fatal error if the output vector has already been mapped using
743 * psa_map_outvec().
744 */
745 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
746 tfm_core_panic();
747 }
748
749 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
750#endif
751
Mingyang Sunb26b2802021-07-07 11:25:00 +0800752 /*
753 * Copy the service buffer to client outvecs. It is a fatal error
754 * if the memory reference for buffer is invalid or not readable.
755 */
756 if (tfm_memory_check(buffer, num_bytes, false,
757 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
758 tfm_core_panic();
759 }
760
761 spm_memcpy((char *)msg->outvec[outvec_idx].base +
762 msg->outvec[outvec_idx].len, buffer, num_bytes);
763
764 /* Update the write number */
765 msg->outvec[outvec_idx].len += num_bytes;
766}
767
Ken Liuf39d8eb2021-10-07 12:55:33 +0800768int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
769 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800770{
771 struct service_t *service = NULL;
772 struct tfm_msg_body_t *msg = NULL;
773 int32_t ret = PSA_SUCCESS;
774 struct tfm_conn_handle_t *conn_handle;
775
776 /* It is a fatal error if message handle is invalid */
777 msg = tfm_spm_get_msg_from_handle(msg_handle);
778 if (!msg) {
779 tfm_core_panic();
780 }
781
782 /*
783 * RoT Service information is needed in this function, stored it in message
784 * body structure. Only two parameters are passed in this function: handle
785 * and status, so it is useful and simply to do like this.
786 */
787 service = msg->service;
788 if (!service) {
789 tfm_core_panic();
790 }
791
792 /*
793 * Three type of message are passed in this function: CONNECTION, REQUEST,
794 * DISCONNECTION. It needs to process differently for each type.
795 */
796 conn_handle = tfm_spm_to_handle_instance(msg_handle);
797 switch (msg->msg.type) {
798 case PSA_IPC_CONNECT:
799 /*
800 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
801 * input status is PSA_SUCCESS. Others return values are based on the
802 * input status.
803 */
804 if (status == PSA_SUCCESS) {
805 ret = msg_handle;
806 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
807 /* Refuse the client connection, indicating a permanent error. */
808 tfm_spm_free_conn_handle(service, conn_handle);
809 ret = PSA_ERROR_CONNECTION_REFUSED;
810 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
811 /* Fail the client connection, indicating a transient error. */
812 ret = PSA_ERROR_CONNECTION_BUSY;
813 } else {
814 tfm_core_panic();
815 }
816 break;
817 case PSA_IPC_DISCONNECT:
818 /* Service handle is not used anymore */
819 tfm_spm_free_conn_handle(service, conn_handle);
820
821 /*
822 * If the message type is PSA_IPC_DISCONNECT, then the status code is
823 * ignored
824 */
825 break;
826 default:
827 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800828
829#if PSA_FRAMEWORK_HAS_MM_IOVEC
830
831 /*
832 * If the unmapped function is not called for an input/output vector
833 * that has been mapped, the framework will remove the mapping.
834 */
835 int i;
836
837 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
838 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
839 SET_IOVEC_UNMAPPED(msg, i);
840 /*
841 * Any output vectors that are still mapped will report that
842 * zero bytes have been written.
843 */
844 if (i >= OUTVEC_IDX_BASE) {
845 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
846 }
847 }
848 }
849
850#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800851 /* Reply to a request message. Return values are based on status */
852 ret = status;
853 /*
854 * The total number of bytes written to a single parameter must be
855 * reported to the client by updating the len member of the
856 * psa_outvec structure for the parameter before returning from
857 * psa_call().
858 */
859 update_caller_outvec_len(msg);
860 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
861 tfm_spm_free_conn_handle(service, conn_handle);
862 }
863 } else {
864 tfm_core_panic();
865 }
866 }
867
868 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
869 /*
870 * If the source of the programmer error is a Secure Partition, the SPM
871 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
872 */
873 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
874 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
875 } else {
876 tfm_core_panic();
877 }
878 } else {
879 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
880 }
881
Ken Liuf39d8eb2021-10-07 12:55:33 +0800882 return backend_instance.replying(msg, ret);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800883}
884
885void tfm_spm_partition_psa_notify(int32_t partition_id)
886{
Ken Liu5d73c872021-08-19 19:23:17 +0800887 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
888
889 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800890}
891
892void tfm_spm_partition_psa_clear(void)
893{
Ken Liu92ede9f2021-10-20 09:35:00 +0800894 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800895 struct partition_t *partition = NULL;
896
897 partition = tfm_spm_get_running_partition();
898 if (!partition) {
899 tfm_core_panic();
900 }
901
902 /*
903 * It is a fatal error if the Secure Partition's doorbell signal is not
904 * currently asserted.
905 */
906 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
907 tfm_core_panic();
908 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800909
910 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800911 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800912 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800913}
914
915void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
916{
Ken Liu92ede9f2021-10-20 09:35:00 +0800917 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800918 struct irq_load_info_t *irq_info = NULL;
919 struct partition_t *partition = NULL;
920
921 partition = tfm_spm_get_running_partition();
922 if (!partition) {
923 tfm_core_panic();
924 }
925
926 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
927 /* It is a fatal error if passed signal is not an interrupt signal. */
928 if (!irq_info) {
929 tfm_core_panic();
930 }
931
932 if (irq_info->flih_func) {
933 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800934 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800935 }
936
937 /* It is a fatal error if passed signal is not currently asserted */
938 if ((partition->signals_asserted & irq_signal) == 0) {
939 tfm_core_panic();
940 }
941
Ken Liu92ede9f2021-10-20 09:35:00 +0800942 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800943 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800944 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800945
Kevin Pengd399a1f2021-09-08 15:33:14 +0800946 tfm_hal_irq_clear_pending(irq_info->source);
947 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800948}
949
950void tfm_spm_partition_psa_panic(void)
951{
952 /*
953 * PSA FF recommends that the SPM causes the system to restart when a secure
954 * partition panics.
955 */
956 tfm_hal_system_reset();
957}
958
959void tfm_spm_partition_irq_enable(psa_signal_t irq_signal)
960{
961 struct partition_t *partition;
962 struct irq_load_info_t *irq_info;
963
964 partition = tfm_spm_get_running_partition();
965 if (!partition) {
966 tfm_core_panic();
967 }
968
969 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
970 if (!irq_info) {
971 tfm_core_panic();
972 }
973
Kevin Pengd399a1f2021-09-08 15:33:14 +0800974 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800975}
976
977psa_irq_status_t tfm_spm_partition_irq_disable(psa_signal_t irq_signal)
978{
979 struct partition_t *partition;
980 struct irq_load_info_t *irq_info;
981
982 partition = tfm_spm_get_running_partition();
983 if (!partition) {
984 tfm_core_panic();
985 }
986
987 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
988 if (!irq_info) {
989 tfm_core_panic();
990 }
991
Kevin Pengd399a1f2021-09-08 15:33:14 +0800992 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800993
994 return 1;
995}
996
997void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
998{
Ken Liu92ede9f2021-10-20 09:35:00 +0800999 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +08001000 struct irq_load_info_t *irq_info;
1001 struct partition_t *partition;
1002
1003 partition = tfm_spm_get_running_partition();
1004 if (!partition) {
1005 tfm_core_panic();
1006 }
1007
1008 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1009 if (!irq_info) {
1010 tfm_core_panic();
1011 }
1012
1013 if (!irq_info->flih_func) {
1014 /* This API is for FLIH IRQs only */
1015 tfm_core_panic();
1016 }
1017
1018 if ((partition->signals_asserted & irq_signal) == 0) {
1019 /* The signal is not asserted */
1020 tfm_core_panic();
1021 }
1022
Ken Liu92ede9f2021-10-20 09:35:00 +08001023 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001024 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001025 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001026}
Shawn Shan038348e2021-09-08 17:11:04 +08001027
1028#if PSA_FRAMEWORK_HAS_MM_IOVEC
1029
1030const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1031 uint32_t invec_idx)
1032{
1033 struct tfm_msg_body_t *msg = NULL;
1034 uint32_t privileged;
1035 struct partition_t *partition = NULL;
1036
1037 /* It is a fatal error if message handle is invalid */
1038 msg = tfm_spm_get_msg_from_handle(msg_handle);
1039 if (!msg) {
1040 tfm_core_panic();
1041 }
1042
1043 partition = msg->service->partition;
1044 privileged = tfm_spm_partition_get_privileged_mode(
1045 partition->p_ldinf->flags);
1046
1047 /*
1048 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1049 * Service that received the message.
1050 */
1051 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1052 tfm_core_panic();
1053 }
1054
1055 /*
1056 * It is a fatal error if message handle does not refer to a request
1057 * message.
1058 */
1059 if (msg->msg.type < PSA_IPC_CALL) {
1060 tfm_core_panic();
1061 }
1062
1063 /*
1064 * It is a fatal error if invec_idx is equal to or greater than
1065 * PSA_MAX_IOVEC.
1066 */
1067 if (invec_idx >= PSA_MAX_IOVEC) {
1068 tfm_core_panic();
1069 }
1070
1071 /* It is a fatal error if the input vector has length zero. */
1072 if (msg->msg.in_size[invec_idx] == 0) {
1073 tfm_core_panic();
1074 }
1075
1076 /*
1077 * It is a fatal error if the input vector has already been mapped using
1078 * psa_map_invec().
1079 */
1080 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1081 tfm_core_panic();
1082 }
1083
1084 /*
1085 * It is a fatal error if the input vector has already been accessed
1086 * using psa_read() or psa_skip().
1087 */
1088 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1089 tfm_core_panic();
1090 }
1091
1092 /*
1093 * It is a fatal error if the memory reference for the wrap input vector is
1094 * invalid or not readable.
1095 */
1096 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1097 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1098 tfm_core_panic();
1099 }
1100
1101 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1102
1103 return msg->invec[invec_idx].base;
1104}
1105
1106void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1107 uint32_t invec_idx)
1108{
1109 struct tfm_msg_body_t *msg = NULL;
1110
1111 /* It is a fatal error if message handle is invalid */
1112 msg = tfm_spm_get_msg_from_handle(msg_handle);
1113 if (!msg) {
1114 tfm_core_panic();
1115 }
1116
1117 /*
1118 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1119 * Service that received the message.
1120 */
1121 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1122 tfm_core_panic();
1123 }
1124
1125 /*
1126 * It is a fatal error if message handle does not refer to a request
1127 * message.
1128 */
1129 if (msg->msg.type < PSA_IPC_CALL) {
1130 tfm_core_panic();
1131 }
1132
1133 /*
1134 * It is a fatal error if invec_idx is equal to or greater than
1135 * PSA_MAX_IOVEC.
1136 */
1137 if (invec_idx >= PSA_MAX_IOVEC) {
1138 tfm_core_panic();
1139 }
1140
1141 /*
1142 * It is a fatal error if The input vector has not been mapped by a call to
1143 * psa_map_invec().
1144 */
1145 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1146 tfm_core_panic();
1147 }
1148
1149 /*
1150 * It is a fatal error if the input vector has already been unmapped by a
1151 * call to psa_unmap_invec().
1152 */
1153 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1154 tfm_core_panic();
1155 }
1156
1157 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1158}
1159
1160void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1161 uint32_t outvec_idx)
1162{
1163 struct tfm_msg_body_t *msg = NULL;
1164 uint32_t privileged;
1165 struct partition_t *partition = NULL;
1166
1167 /* It is a fatal error if message handle is invalid */
1168 msg = tfm_spm_get_msg_from_handle(msg_handle);
1169 if (!msg) {
1170 tfm_core_panic();
1171 }
1172
1173 partition = msg->service->partition;
1174 privileged = tfm_spm_partition_get_privileged_mode(
1175 partition->p_ldinf->flags);
1176
1177 /*
1178 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1179 * Service that received the message.
1180 */
1181 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1182 tfm_core_panic();
1183 }
1184
1185 /*
1186 * It is a fatal error if message handle does not refer to a request
1187 * message.
1188 */
1189 if (msg->msg.type < PSA_IPC_CALL) {
1190 tfm_core_panic();
1191 }
1192
1193 /*
1194 * It is a fatal error if outvec_idx is equal to or greater than
1195 * PSA_MAX_IOVEC.
1196 */
1197 if (outvec_idx >= PSA_MAX_IOVEC) {
1198 tfm_core_panic();
1199 }
1200
1201 /* It is a fatal error if the output vector has length zero. */
1202 if (msg->msg.out_size[outvec_idx] == 0) {
1203 tfm_core_panic();
1204 }
1205
1206 /*
1207 * It is a fatal error if the output vector has already been mapped using
1208 * psa_map_outvec().
1209 */
1210 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1211 tfm_core_panic();
1212 }
1213
1214 /*
1215 * It is a fatal error if the output vector has already been accessed
1216 * using psa_write().
1217 */
1218 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1219 tfm_core_panic();
1220 }
1221
1222 /*
1223 * It is a fatal error if the output vector is invalid or not read-write.
1224 */
1225 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1226 msg->outvec[outvec_idx].len, false,
1227 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1228 tfm_core_panic();
1229 }
1230 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1231
1232 return msg->outvec[outvec_idx].base;
1233}
1234
1235void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1236 uint32_t outvec_idx, size_t len)
1237{
1238 struct tfm_msg_body_t *msg = NULL;
1239
1240 /* It is a fatal error if message handle is invalid */
1241 msg = tfm_spm_get_msg_from_handle(msg_handle);
1242 if (!msg) {
1243 tfm_core_panic();
1244 }
1245
1246 /*
1247 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1248 * Service that received the message.
1249 */
1250 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1251 tfm_core_panic();
1252 }
1253
1254 /*
1255 * It is a fatal error if message handle does not refer to a request
1256 * message.
1257 */
1258 if (msg->msg.type < PSA_IPC_CALL) {
1259 tfm_core_panic();
1260 }
1261
1262 /*
1263 * It is a fatal error if outvec_idx is equal to or greater than
1264 * PSA_MAX_IOVEC.
1265 */
1266 if (outvec_idx >= PSA_MAX_IOVEC) {
1267 tfm_core_panic();
1268 }
1269
1270 /*
1271 * It is a fatal error if len is greater than the output vector size.
1272 */
1273 if (len > msg->msg.out_size[outvec_idx]) {
1274 tfm_core_panic();
1275 }
1276
1277 /*
1278 * It is a fatal error if The output vector has not been mapped by a call to
1279 * psa_map_outvec().
1280 */
1281 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1282 tfm_core_panic();
1283 }
1284
1285 /*
1286 * It is a fatal error if the output vector has already been unmapped by a
1287 * call to psa_unmap_outvec().
1288 */
1289 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1290 tfm_core_panic();
1291 }
1292
1293 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1294
1295 /* Update the write number */
1296 msg->outvec[outvec_idx].len = len;
1297}
1298
1299#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */