blob: 8108158b0417827bc8cb8e9f851c0e945a3edc35 [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Shawn Shanb222d892021-01-04 17:41:48 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080013#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080014#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080015#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080016#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080017#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080018#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080019#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080020#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080021#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080022#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080023#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080024#include "tfm_rpc.h"
25#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080026#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080027#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080028#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080029
Ken Liub3b2cb62021-05-22 00:39:28 +080030#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080031extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080032
Shawn Shan038348e2021-09-08 17:11:04 +080033#if PSA_FRAMEWORK_HAS_MM_IOVEC
34
35/*
36 * The MM-IOVEC status
37 * The max total number of invec and outvec is 8.
38 * Each invec/outvec takes 4 bit, 32 bits in total.
39 *
40 * The encoding format of the MM-IOVEC status:
41 *--------------------------------------------------------------
42 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
43 *--------------------------------------------------------------
44 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
45 *--------------------------------------------------------------
46 *
47 * Take invec[0] as an example:
48 *
49 * bit 0: whether invec[0] has been mapped.
50 * bit 1: whether invec[0] has been unmapped.
51 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
52 * psa_write().
53 * bit 3: reserved for invec[0].
54 */
55
56#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
57#define OUTVEC_IDX_BASE 4 /*
58 * Base index of outvec.
59 * There are four invecs in front of
60 * outvec.
61 */
62#define INVEC_IDX_BASE 0 /* Base index of invec. */
63
64#define IOVEC_MAPPED_BIT (1U << 0)
65#define IOVEC_UNMAPPED_BIT (1U << 1)
66#define IOVEC_ACCESSED_BIT (1U << 2)
67
68#define IOVEC_IS_MAPPED(msg, iovec_idx) \
69 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
70 IOVEC_MAPPED_BIT)
71#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
72 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
73 IOVEC_UNMAPPED_BIT)
74#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
75 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
76 IOVEC_ACCESSED_BIT)
77#define SET_IOVEC_MAPPED(msg, iovec_idx) \
78 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
79 ((iovec_idx) * IOVEC_STATUS_BITS)))
80#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
81 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
82 ((iovec_idx) * IOVEC_STATUS_BITS)))
83#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
84 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
85 ((iovec_idx) * IOVEC_STATUS_BITS)))
86
87#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
88
Mingyang Suneeca4652021-07-15 15:19:16 +080089uint32_t tfm_spm_get_lifecycle_state(void)
90{
91 /*
92 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
93 * implemented in the future.
94 */
95 return PSA_LIFECYCLE_UNKNOWN;
96}
97
98/* PSA Client API function body */
99
Mingyang Sund44522a2020-01-16 16:48:37 +0800100uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800101{
102 return PSA_FRAMEWORK_VERSION;
103}
104
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800105uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800106{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800107 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800108 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800109
110 /*
111 * It should return PSA_VERSION_NONE if the RoT Service is not
112 * implemented.
113 */
114 service = tfm_spm_get_service_by_sid(sid);
115 if (!service) {
116 return PSA_VERSION_NONE;
117 }
118
119 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800120 * It should return PSA_VERSION_NONE if the caller is not authorized
121 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800122 */
Ken Liubcae38b2021-01-20 15:47:44 +0800123 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800124 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800125 }
126
Ken Liuacd2a572021-05-12 16:19:04 +0800127 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800128}
129
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800130psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
David Hu733d8f92019-09-23 15:32:40 +0800131{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800132 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800133 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800134 struct tfm_conn_handle_t *connect_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800135 int32_t client_id;
Ken Liu505b1702020-05-29 13:19:58 +0800136 psa_handle_t handle;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800137 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Sun620c8562021-11-10 11:44:58 +0800138 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
David Hu733d8f92019-09-23 15:32:40 +0800139
Kevin Pengedb8ee42021-03-09 16:50:11 +0800140 /*
141 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
142 * platform.
143 */
David Hu733d8f92019-09-23 15:32:40 +0800144 service = tfm_spm_get_service_by_sid(sid);
145 if (!service) {
Shawn Shanb222d892021-01-04 17:41:48 +0800146 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
David Hu733d8f92019-09-23 15:32:40 +0800147 }
148
Mingyang Sunef42f442021-06-11 15:07:58 +0800149 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
150 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
151 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
152 }
153
Kevin Pengedb8ee42021-03-09 16:50:11 +0800154 /*
155 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
156 * RoT Service.
157 */
158 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
159 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
160 }
161
162 /*
163 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
164 * not supported on the platform.
165 */
166 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
167 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
168 }
169
Kevin Peng385fda82021-08-18 10:41:19 +0800170 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800171
David Hu733d8f92019-09-23 15:32:40 +0800172 /*
173 * Create connection handle here since it is possible to return the error
174 * code to client when creation fails.
175 */
Mingyang Sun620c8562021-11-10 11:44:58 +0800176 CRITICAL_SECTION_ENTER(cs_assert);
Summer Qin1ce712a2019-10-14 18:04:05 +0800177 connect_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800178 CRITICAL_SECTION_LEAVE(cs_assert);
Summer Qin630c76b2020-05-20 10:32:58 +0800179 if (!connect_handle) {
David Hu733d8f92019-09-23 15:32:40 +0800180 return PSA_ERROR_CONNECTION_BUSY;
181 }
182
Kevin Pengdf6aa292021-03-11 17:58:50 +0800183 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
184 if (!msg) {
185 /* Have no enough resource to create message */
186 return PSA_ERROR_CONNECTION_BUSY;
187 }
David Hu733d8f92019-09-23 15:32:40 +0800188
Ken Liu505b1702020-05-29 13:19:58 +0800189 handle = tfm_spm_to_user_handle(connect_handle);
David Hu733d8f92019-09-23 15:32:40 +0800190 /* No input or output needed for connect message */
Ken Liu505b1702020-05-29 13:19:58 +0800191 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
Summer Qin1ce712a2019-10-14 18:04:05 +0800192 client_id, NULL, 0, NULL, 0, NULL);
David Hu733d8f92019-09-23 15:32:40 +0800193
Mingyang Sundeae45d2021-09-06 15:31:07 +0800194 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800195}
196
Mingyang Suneeca4652021-07-15 15:19:16 +0800197psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
198 uint32_t ctrl_param,
199 const psa_invec *inptr,
200 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800201{
202 psa_invec invecs[PSA_MAX_IOVEC];
203 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800204 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800205 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800206 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800207 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800208 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800209 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800210 uint32_t privileged;
Mingyang Sun620c8562021-11-10 11:44:58 +0800211 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800212 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800213 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
214 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
215 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800216
217 /* The request type must be zero or positive. */
218 if (type < 0) {
219 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
220 }
David Hu733d8f92019-09-23 15:32:40 +0800221
Shawn Shanb222d892021-01-04 17:41:48 +0800222 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800223 if ((in_num > PSA_MAX_IOVEC) ||
224 (out_num > PSA_MAX_IOVEC) ||
225 (in_num + out_num > PSA_MAX_IOVEC)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800226 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800227 }
228
Kevin Peng385fda82021-08-18 10:41:19 +0800229 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800230
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800231 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800232 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800233 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800234
235 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
236 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
237 }
238
Mingyang Sun453ad402021-03-17 17:58:33 +0800239 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800240 if (!service) {
241 tfm_core_panic();
242 }
243
Ken Liub3b2cb62021-05-22 00:39:28 +0800244 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800245
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800246 /*
247 * It is a PROGRAMMER ERROR if the caller is not authorized to access
248 * the RoT Service.
249 */
250 if (tfm_spm_check_authorization(sid, service, ns_caller)
251 != SPM_SUCCESS) {
252 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
253 }
254
Mingyang Sun453ad402021-03-17 17:58:33 +0800255 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
256
257 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
258 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
259 }
260
Mingyang Sun620c8562021-11-10 11:44:58 +0800261 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800262 conn_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800263 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800264
265 if (!conn_handle) {
266 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_BUSY);
267 }
268
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800269 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800270 handle = tfm_spm_to_user_handle(conn_handle);
271 } else {
272 conn_handle = tfm_spm_to_handle_instance(handle);
273
274 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
275 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
276 != SPM_SUCCESS) {
277 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
278 }
279
280 /*
281 * It is a PROGRAMMER ERROR if the connection is currently
282 * handling a request.
283 */
284 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
285 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
286 }
287
288 /*
289 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
290 * has been terminated by the RoT Service.
291 */
292 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
293 return PSA_ERROR_PROGRAMMER_ERROR;
294 }
295
Ken Liuf39d8eb2021-10-07 12:55:33 +0800296 service = conn_handle->internal_msg.service;
Summer Qin1ce712a2019-10-14 18:04:05 +0800297 }
Shawn Shanb222d892021-01-04 17:41:48 +0800298
David Hu733d8f92019-09-23 15:32:40 +0800299 if (!service) {
300 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800301 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800302 }
303
Mingyang Sune529e3b2021-07-12 14:46:30 +0800304 privileged = tfm_spm_get_caller_privilege_mode();
305
Kevin Pengedb8ee42021-03-09 16:50:11 +0800306 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800307 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800308 * if the memory reference for the wrap input vector is invalid or not
309 * readable.
310 */
311 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800312 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800313 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800314 }
Summer Qinba2346e2019-11-12 16:26:31 +0800315
David Hu733d8f92019-09-23 15:32:40 +0800316 /*
317 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800318 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800319 * the wrap output vector is invalid or not read-write.
320 */
321 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800322 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800323 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800324 }
325
Summer Qinf24dbb52020-07-23 14:53:54 +0800326 spm_memset(invecs, 0, sizeof(invecs));
327 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800328
329 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800330 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
331 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800332
333 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800334 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800335 * memory reference was invalid or not readable.
336 */
337 for (i = 0; i < in_num; i++) {
338 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800339 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800340 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800341 }
342 }
Summer Qinba2346e2019-11-12 16:26:31 +0800343
344 /*
345 * Clients must never overlap input parameters because of the risk of a
346 * double-fetch inconsistency.
347 * Overflow is checked in tfm_memory_check functions.
348 */
349 for (i = 0; i + 1 < in_num; i++) {
350 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100351 if (!((char *) invecs[j].base + invecs[j].len <=
352 (char *) invecs[i].base ||
353 (char *) invecs[j].base >=
354 (char *) invecs[i].base + invecs[i].len)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800355 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
Summer Qinba2346e2019-11-12 16:26:31 +0800356 }
357 }
358 }
359
David Hu733d8f92019-09-23 15:32:40 +0800360 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800361 * For client output vector, it is a PROGRAMMER ERROR if the provided
362 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800363 */
364 for (i = 0; i < out_num; i++) {
365 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800366 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800367 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800368 }
369 }
370
371 /*
372 * FixMe: Need to check if the message is unrecognized by the RoT
373 * Service or incorrectly formatted.
374 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800375 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
376 if (!msg) {
377 /* FixMe: Need to implement one mechanism to resolve this failure. */
378 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
379 }
David Hu733d8f92019-09-23 15:32:40 +0800380
Ken Liu505b1702020-05-29 13:19:58 +0800381 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800382 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800383
Mingyang Sundeae45d2021-09-06 15:31:07 +0800384 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800385}
386
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800387void tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800388{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800389 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800390 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800391 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800392 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800393 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800394
395 /* It will have no effect if called with the NULL handle */
396 if (handle == PSA_NULL_HANDLE) {
397 return;
398 }
399
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800400 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800401 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800402 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
403 }
404
Kevin Peng385fda82021-08-18 10:41:19 +0800405 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800406
Summer Qin373feb12020-03-27 15:35:33 +0800407 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800408 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800409 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
410 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800411 */
Ken Liubcae38b2021-01-20 15:47:44 +0800412 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800413 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Summer Qin1ce712a2019-10-14 18:04:05 +0800414 }
Shawn Shanb222d892021-01-04 17:41:48 +0800415
Ken Liuf39d8eb2021-10-07 12:55:33 +0800416 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800417 if (!service) {
418 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800419 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800420 }
421
Kevin Pengdf6aa292021-03-11 17:58:50 +0800422 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
423 if (!msg) {
424 /* FixMe: Need to implement one mechanism to resolve this failure. */
425 tfm_core_panic();
426 }
David Hu733d8f92019-09-23 15:32:40 +0800427
Shawn Shanb222d892021-01-04 17:41:48 +0800428 /*
429 * It is a PROGRAMMER ERROR if the connection is currently handling a
430 * request.
431 */
Summer Qin630c76b2020-05-20 10:32:58 +0800432 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Shawn Shanb222d892021-01-04 17:41:48 +0800433 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Shawn Shancc39fcb2019-11-13 15:38:16 +0800434 }
435
David Hu733d8f92019-09-23 15:32:40 +0800436 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800437 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800438 NULL, 0, NULL, 0, NULL);
439
Mingyang Sundeae45d2021-09-06 15:31:07 +0800440 (void)backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800441}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800442
Mingyang Suneeca4652021-07-15 15:19:16 +0800443/* PSA Partition API function body */
444
Mingyang Sunb26b2802021-07-07 11:25:00 +0800445psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
446 uint32_t timeout)
447{
448 struct partition_t *partition = NULL;
449
450 /*
451 * Timeout[30:0] are reserved for future use.
452 * SPM must ignore the value of RES.
453 */
454 timeout &= PSA_TIMEOUT_MASK;
455
456 partition = tfm_spm_get_running_partition();
457 if (!partition) {
458 tfm_core_panic();
459 }
460
461 /*
462 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
463 * signals.
464 */
465 if ((partition->signals_allowed & signal_mask) == 0) {
466 tfm_core_panic();
467 }
468
469 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800470 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800471 * In this case, the return value of this function is temporary set into
472 * runtime context. After new signal(s) are available, the return value
473 * is updated with the available signal(s) and blocked thread gets to run.
474 */
475 if (timeout == PSA_BLOCK &&
476 (partition->signals_asserted & signal_mask) == 0) {
477 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800478 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800479 }
480
481 return partition->signals_asserted & signal_mask;
482}
483
484psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
485{
486 struct tfm_msg_body_t *tmp_msg = NULL;
487 struct partition_t *partition = NULL;
488 uint32_t privileged;
489
490 /*
491 * Only one message could be retrieved every time for psa_get(). It is a
492 * fatal error if the input signal has more than a signal bit set.
493 */
494 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
495 tfm_core_panic();
496 }
497
498 partition = tfm_spm_get_running_partition();
499 if (!partition) {
500 tfm_core_panic();
501 }
502 privileged = tfm_spm_partition_get_privileged_mode(
503 partition->p_ldinf->flags);
504
505 /*
506 * Write the message to the service buffer. It is a fatal error if the
507 * input msg pointer is not a valid memory reference or not read-write.
508 */
509 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
510 privileged) != SPM_SUCCESS) {
511 tfm_core_panic();
512 }
513
514 /*
515 * It is a fatal error if the caller call psa_get() when no message has
516 * been set. The caller must call this function after an RoT Service signal
517 * is returned by psa_wait().
518 */
519 if (partition->signals_asserted == 0) {
520 tfm_core_panic();
521 }
522
523 /*
524 * It is a fatal error if the RoT Service signal is not currently asserted.
525 */
526 if ((partition->signals_asserted & signal) == 0) {
527 tfm_core_panic();
528 }
529
530 /*
531 * Get message by signal from partition. It is a fatal error if getting
532 * failed, which means the input signal is not correspond to an RoT service.
533 */
534 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
535 if (!tmp_msg) {
536 return PSA_ERROR_DOES_NOT_EXIST;
537 }
538
539 (TO_CONTAINER(tmp_msg,
540 struct tfm_conn_handle_t,
541 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
542
543 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
544
545 return PSA_SUCCESS;
546}
547
548void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
549{
550 struct tfm_msg_body_t *msg = NULL;
551 struct tfm_conn_handle_t *conn_handle;
552
553 /* It is a fatal error if message handle is invalid */
554 msg = tfm_spm_get_msg_from_handle(msg_handle);
555 if (!msg) {
556 tfm_core_panic();
557 }
558
559 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
560 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
561 tfm_core_panic();
562 }
563
564 msg->msg.rhandle = rhandle;
565 conn_handle = tfm_spm_to_handle_instance(msg_handle);
566
567 /* Store reverse handle for following client calls. */
568 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
569}
570
571size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
572 void *buffer, size_t num_bytes)
573{
574 size_t bytes;
575 struct tfm_msg_body_t *msg = NULL;
576 uint32_t privileged;
577 struct partition_t *partition = NULL;
578
579 /* It is a fatal error if message handle is invalid */
580 msg = tfm_spm_get_msg_from_handle(msg_handle);
581 if (!msg) {
582 tfm_core_panic();
583 }
584
585 partition = msg->service->partition;
586 privileged = tfm_spm_partition_get_privileged_mode(
587 partition->p_ldinf->flags);
588
589 /*
590 * It is a fatal error if message handle does not refer to a request
591 * message
592 */
593 if (msg->msg.type < PSA_IPC_CALL) {
594 tfm_core_panic();
595 }
596
597 /*
598 * It is a fatal error if invec_idx is equal to or greater than
599 * PSA_MAX_IOVEC
600 */
601 if (invec_idx >= PSA_MAX_IOVEC) {
602 tfm_core_panic();
603 }
604
605 /* There was no remaining data in this input vector */
606 if (msg->msg.in_size[invec_idx] == 0) {
607 return 0;
608 }
609
Shawn Shan038348e2021-09-08 17:11:04 +0800610#if PSA_FRAMEWORK_HAS_MM_IOVEC
611 /*
612 * It is a fatal error if the input vector has already been mapped using
613 * psa_map_invec().
614 */
615 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
616 tfm_core_panic();
617 }
618
619 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
620#endif
621
Mingyang Sunb26b2802021-07-07 11:25:00 +0800622 /*
623 * Copy the client data to the service buffer. It is a fatal error
624 * if the memory reference for buffer is invalid or not read-write.
625 */
626 if (tfm_memory_check(buffer, num_bytes, false,
627 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
628 tfm_core_panic();
629 }
630
631 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
632 msg->msg.in_size[invec_idx] : num_bytes;
633
634 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
635
636 /* There maybe some remaining data */
637 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
638 msg->msg.in_size[invec_idx] -= bytes;
639
640 return bytes;
641}
642
643size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
644 size_t num_bytes)
645{
646 struct tfm_msg_body_t *msg = NULL;
647
648 /* It is a fatal error if message handle is invalid */
649 msg = tfm_spm_get_msg_from_handle(msg_handle);
650 if (!msg) {
651 tfm_core_panic();
652 }
653
654 /*
655 * It is a fatal error if message handle does not refer to a request
656 * message
657 */
658 if (msg->msg.type < PSA_IPC_CALL) {
659 tfm_core_panic();
660 }
661
662 /*
663 * It is a fatal error if invec_idx is equal to or greater than
664 * PSA_MAX_IOVEC
665 */
666 if (invec_idx >= PSA_MAX_IOVEC) {
667 tfm_core_panic();
668 }
669
670 /* There was no remaining data in this input vector */
671 if (msg->msg.in_size[invec_idx] == 0) {
672 return 0;
673 }
674
Shawn Shan038348e2021-09-08 17:11:04 +0800675#if PSA_FRAMEWORK_HAS_MM_IOVEC
676 /*
677 * It is a fatal error if the input vector has already been mapped using
678 * psa_map_invec().
679 */
680 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
681 tfm_core_panic();
682 }
683
684 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
685#endif
686
Mingyang Sunb26b2802021-07-07 11:25:00 +0800687 /*
688 * If num_bytes is greater than the remaining size of the input vector then
689 * the remaining size of the input vector is used.
690 */
691 if (num_bytes > msg->msg.in_size[invec_idx]) {
692 num_bytes = msg->msg.in_size[invec_idx];
693 }
694
695 /* There maybe some remaining data */
696 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
697 num_bytes;
698 msg->msg.in_size[invec_idx] -= num_bytes;
699
700 return num_bytes;
701}
702
703void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
704 const void *buffer, size_t num_bytes)
705{
706 struct tfm_msg_body_t *msg = NULL;
707 uint32_t privileged;
708 struct partition_t *partition = NULL;
709
710 /* It is a fatal error if message handle is invalid */
711 msg = tfm_spm_get_msg_from_handle(msg_handle);
712 if (!msg) {
713 tfm_core_panic();
714 }
715
716 partition = msg->service->partition;
717 privileged = tfm_spm_partition_get_privileged_mode(
718 partition->p_ldinf->flags);
719
720 /*
721 * It is a fatal error if message handle does not refer to a request
722 * message
723 */
724 if (msg->msg.type < PSA_IPC_CALL) {
725 tfm_core_panic();
726 }
727
728 /*
729 * It is a fatal error if outvec_idx is equal to or greater than
730 * PSA_MAX_IOVEC
731 */
732 if (outvec_idx >= PSA_MAX_IOVEC) {
733 tfm_core_panic();
734 }
735
736 /*
737 * It is a fatal error if the call attempts to write data past the end of
738 * the client output vector
739 */
740 if (num_bytes > msg->msg.out_size[outvec_idx] -
741 msg->outvec[outvec_idx].len) {
742 tfm_core_panic();
743 }
744
Shawn Shan038348e2021-09-08 17:11:04 +0800745#if PSA_FRAMEWORK_HAS_MM_IOVEC
746 /*
747 * It is a fatal error if the output vector has already been mapped using
748 * psa_map_outvec().
749 */
750 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
751 tfm_core_panic();
752 }
753
754 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
755#endif
756
Mingyang Sunb26b2802021-07-07 11:25:00 +0800757 /*
758 * Copy the service buffer to client outvecs. It is a fatal error
759 * if the memory reference for buffer is invalid or not readable.
760 */
761 if (tfm_memory_check(buffer, num_bytes, false,
762 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
763 tfm_core_panic();
764 }
765
766 spm_memcpy((char *)msg->outvec[outvec_idx].base +
767 msg->outvec[outvec_idx].len, buffer, num_bytes);
768
769 /* Update the write number */
770 msg->outvec[outvec_idx].len += num_bytes;
771}
772
Ken Liuf39d8eb2021-10-07 12:55:33 +0800773int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
774 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800775{
776 struct service_t *service = NULL;
777 struct tfm_msg_body_t *msg = NULL;
778 int32_t ret = PSA_SUCCESS;
779 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun620c8562021-11-10 11:44:58 +0800780 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800781
782 /* It is a fatal error if message handle is invalid */
783 msg = tfm_spm_get_msg_from_handle(msg_handle);
784 if (!msg) {
785 tfm_core_panic();
786 }
787
788 /*
789 * RoT Service information is needed in this function, stored it in message
790 * body structure. Only two parameters are passed in this function: handle
791 * and status, so it is useful and simply to do like this.
792 */
793 service = msg->service;
794 if (!service) {
795 tfm_core_panic();
796 }
797
798 /*
799 * Three type of message are passed in this function: CONNECTION, REQUEST,
800 * DISCONNECTION. It needs to process differently for each type.
801 */
802 conn_handle = tfm_spm_to_handle_instance(msg_handle);
803 switch (msg->msg.type) {
804 case PSA_IPC_CONNECT:
805 /*
806 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
807 * input status is PSA_SUCCESS. Others return values are based on the
808 * input status.
809 */
810 if (status == PSA_SUCCESS) {
811 ret = msg_handle;
812 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
813 /* Refuse the client connection, indicating a permanent error. */
814 tfm_spm_free_conn_handle(service, conn_handle);
815 ret = PSA_ERROR_CONNECTION_REFUSED;
816 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
817 /* Fail the client connection, indicating a transient error. */
818 ret = PSA_ERROR_CONNECTION_BUSY;
819 } else {
820 tfm_core_panic();
821 }
822 break;
823 case PSA_IPC_DISCONNECT:
824 /* Service handle is not used anymore */
825 tfm_spm_free_conn_handle(service, conn_handle);
826
827 /*
828 * If the message type is PSA_IPC_DISCONNECT, then the status code is
829 * ignored
830 */
831 break;
832 default:
833 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800834
835#if PSA_FRAMEWORK_HAS_MM_IOVEC
836
837 /*
838 * If the unmapped function is not called for an input/output vector
839 * that has been mapped, the framework will remove the mapping.
840 */
841 int i;
842
843 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
844 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
845 SET_IOVEC_UNMAPPED(msg, i);
846 /*
847 * Any output vectors that are still mapped will report that
848 * zero bytes have been written.
849 */
850 if (i >= OUTVEC_IDX_BASE) {
851 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
852 }
853 }
854 }
855
856#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800857 /* Reply to a request message. Return values are based on status */
858 ret = status;
859 /*
860 * The total number of bytes written to a single parameter must be
861 * reported to the client by updating the len member of the
862 * psa_outvec structure for the parameter before returning from
863 * psa_call().
864 */
865 update_caller_outvec_len(msg);
866 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
867 tfm_spm_free_conn_handle(service, conn_handle);
868 }
869 } else {
870 tfm_core_panic();
871 }
872 }
873
874 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
875 /*
876 * If the source of the programmer error is a Secure Partition, the SPM
877 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
878 */
879 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
880 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
881 } else {
882 tfm_core_panic();
883 }
884 } else {
885 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
886 }
887
Mingyang Sun620c8562021-11-10 11:44:58 +0800888 /*
889 * TODO: It can be optimized further by moving critical section protection
890 * to mailbox. Also need to check implementation when secure context is
891 * involved.
892 */
893 CRITICAL_SECTION_ENTER(cs_assert);
894 ret = backend_instance.replying(msg, ret);
895 CRITICAL_SECTION_LEAVE(cs_assert);
896
897 return ret;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800898}
899
900void tfm_spm_partition_psa_notify(int32_t partition_id)
901{
Ken Liu5d73c872021-08-19 19:23:17 +0800902 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
903
904 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800905}
906
907void tfm_spm_partition_psa_clear(void)
908{
Ken Liu92ede9f2021-10-20 09:35:00 +0800909 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800910 struct partition_t *partition = NULL;
911
912 partition = tfm_spm_get_running_partition();
913 if (!partition) {
914 tfm_core_panic();
915 }
916
917 /*
918 * It is a fatal error if the Secure Partition's doorbell signal is not
919 * currently asserted.
920 */
921 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
922 tfm_core_panic();
923 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800924
925 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800926 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800927 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800928}
929
930void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
931{
Ken Liu92ede9f2021-10-20 09:35:00 +0800932 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800933 struct irq_load_info_t *irq_info = NULL;
934 struct partition_t *partition = NULL;
935
936 partition = tfm_spm_get_running_partition();
937 if (!partition) {
938 tfm_core_panic();
939 }
940
941 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
942 /* It is a fatal error if passed signal is not an interrupt signal. */
943 if (!irq_info) {
944 tfm_core_panic();
945 }
946
947 if (irq_info->flih_func) {
948 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800949 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800950 }
951
952 /* It is a fatal error if passed signal is not currently asserted */
953 if ((partition->signals_asserted & irq_signal) == 0) {
954 tfm_core_panic();
955 }
956
Ken Liu92ede9f2021-10-20 09:35:00 +0800957 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800958 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800959 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800960
Kevin Pengd399a1f2021-09-08 15:33:14 +0800961 tfm_hal_irq_clear_pending(irq_info->source);
962 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800963}
964
965void tfm_spm_partition_psa_panic(void)
966{
967 /*
968 * PSA FF recommends that the SPM causes the system to restart when a secure
969 * partition panics.
970 */
971 tfm_hal_system_reset();
972}
973
974void tfm_spm_partition_irq_enable(psa_signal_t irq_signal)
975{
976 struct partition_t *partition;
977 struct irq_load_info_t *irq_info;
978
979 partition = tfm_spm_get_running_partition();
980 if (!partition) {
981 tfm_core_panic();
982 }
983
984 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
985 if (!irq_info) {
986 tfm_core_panic();
987 }
988
Kevin Pengd399a1f2021-09-08 15:33:14 +0800989 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800990}
991
992psa_irq_status_t tfm_spm_partition_irq_disable(psa_signal_t irq_signal)
993{
994 struct partition_t *partition;
995 struct irq_load_info_t *irq_info;
996
997 partition = tfm_spm_get_running_partition();
998 if (!partition) {
999 tfm_core_panic();
1000 }
1001
1002 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1003 if (!irq_info) {
1004 tfm_core_panic();
1005 }
1006
Kevin Pengd399a1f2021-09-08 15:33:14 +08001007 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001008
1009 return 1;
1010}
1011
1012void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
1013{
Ken Liu92ede9f2021-10-20 09:35:00 +08001014 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +08001015 struct irq_load_info_t *irq_info;
1016 struct partition_t *partition;
1017
1018 partition = tfm_spm_get_running_partition();
1019 if (!partition) {
1020 tfm_core_panic();
1021 }
1022
1023 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1024 if (!irq_info) {
1025 tfm_core_panic();
1026 }
1027
1028 if (!irq_info->flih_func) {
1029 /* This API is for FLIH IRQs only */
1030 tfm_core_panic();
1031 }
1032
1033 if ((partition->signals_asserted & irq_signal) == 0) {
1034 /* The signal is not asserted */
1035 tfm_core_panic();
1036 }
1037
Ken Liu92ede9f2021-10-20 09:35:00 +08001038 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001039 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001040 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001041}
Shawn Shan038348e2021-09-08 17:11:04 +08001042
1043#if PSA_FRAMEWORK_HAS_MM_IOVEC
1044
1045const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1046 uint32_t invec_idx)
1047{
1048 struct tfm_msg_body_t *msg = NULL;
1049 uint32_t privileged;
1050 struct partition_t *partition = NULL;
1051
1052 /* It is a fatal error if message handle is invalid */
1053 msg = tfm_spm_get_msg_from_handle(msg_handle);
1054 if (!msg) {
1055 tfm_core_panic();
1056 }
1057
1058 partition = msg->service->partition;
1059 privileged = tfm_spm_partition_get_privileged_mode(
1060 partition->p_ldinf->flags);
1061
1062 /*
1063 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1064 * Service that received the message.
1065 */
1066 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1067 tfm_core_panic();
1068 }
1069
1070 /*
1071 * It is a fatal error if message handle does not refer to a request
1072 * message.
1073 */
1074 if (msg->msg.type < PSA_IPC_CALL) {
1075 tfm_core_panic();
1076 }
1077
1078 /*
1079 * It is a fatal error if invec_idx is equal to or greater than
1080 * PSA_MAX_IOVEC.
1081 */
1082 if (invec_idx >= PSA_MAX_IOVEC) {
1083 tfm_core_panic();
1084 }
1085
1086 /* It is a fatal error if the input vector has length zero. */
1087 if (msg->msg.in_size[invec_idx] == 0) {
1088 tfm_core_panic();
1089 }
1090
1091 /*
1092 * It is a fatal error if the input vector has already been mapped using
1093 * psa_map_invec().
1094 */
1095 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1096 tfm_core_panic();
1097 }
1098
1099 /*
1100 * It is a fatal error if the input vector has already been accessed
1101 * using psa_read() or psa_skip().
1102 */
1103 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1104 tfm_core_panic();
1105 }
1106
1107 /*
1108 * It is a fatal error if the memory reference for the wrap input vector is
1109 * invalid or not readable.
1110 */
1111 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1112 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1113 tfm_core_panic();
1114 }
1115
1116 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1117
1118 return msg->invec[invec_idx].base;
1119}
1120
1121void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1122 uint32_t invec_idx)
1123{
1124 struct tfm_msg_body_t *msg = NULL;
1125
1126 /* It is a fatal error if message handle is invalid */
1127 msg = tfm_spm_get_msg_from_handle(msg_handle);
1128 if (!msg) {
1129 tfm_core_panic();
1130 }
1131
1132 /*
1133 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1134 * Service that received the message.
1135 */
1136 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1137 tfm_core_panic();
1138 }
1139
1140 /*
1141 * It is a fatal error if message handle does not refer to a request
1142 * message.
1143 */
1144 if (msg->msg.type < PSA_IPC_CALL) {
1145 tfm_core_panic();
1146 }
1147
1148 /*
1149 * It is a fatal error if invec_idx is equal to or greater than
1150 * PSA_MAX_IOVEC.
1151 */
1152 if (invec_idx >= PSA_MAX_IOVEC) {
1153 tfm_core_panic();
1154 }
1155
1156 /*
1157 * It is a fatal error if The input vector has not been mapped by a call to
1158 * psa_map_invec().
1159 */
1160 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1161 tfm_core_panic();
1162 }
1163
1164 /*
1165 * It is a fatal error if the input vector has already been unmapped by a
1166 * call to psa_unmap_invec().
1167 */
1168 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1169 tfm_core_panic();
1170 }
1171
1172 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1173}
1174
1175void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1176 uint32_t outvec_idx)
1177{
1178 struct tfm_msg_body_t *msg = NULL;
1179 uint32_t privileged;
1180 struct partition_t *partition = NULL;
1181
1182 /* It is a fatal error if message handle is invalid */
1183 msg = tfm_spm_get_msg_from_handle(msg_handle);
1184 if (!msg) {
1185 tfm_core_panic();
1186 }
1187
1188 partition = msg->service->partition;
1189 privileged = tfm_spm_partition_get_privileged_mode(
1190 partition->p_ldinf->flags);
1191
1192 /*
1193 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1194 * Service that received the message.
1195 */
1196 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1197 tfm_core_panic();
1198 }
1199
1200 /*
1201 * It is a fatal error if message handle does not refer to a request
1202 * message.
1203 */
1204 if (msg->msg.type < PSA_IPC_CALL) {
1205 tfm_core_panic();
1206 }
1207
1208 /*
1209 * It is a fatal error if outvec_idx is equal to or greater than
1210 * PSA_MAX_IOVEC.
1211 */
1212 if (outvec_idx >= PSA_MAX_IOVEC) {
1213 tfm_core_panic();
1214 }
1215
1216 /* It is a fatal error if the output vector has length zero. */
1217 if (msg->msg.out_size[outvec_idx] == 0) {
1218 tfm_core_panic();
1219 }
1220
1221 /*
1222 * It is a fatal error if the output vector has already been mapped using
1223 * psa_map_outvec().
1224 */
1225 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1226 tfm_core_panic();
1227 }
1228
1229 /*
1230 * It is a fatal error if the output vector has already been accessed
1231 * using psa_write().
1232 */
1233 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1234 tfm_core_panic();
1235 }
1236
1237 /*
1238 * It is a fatal error if the output vector is invalid or not read-write.
1239 */
1240 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1241 msg->outvec[outvec_idx].len, false,
1242 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1243 tfm_core_panic();
1244 }
1245 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1246
1247 return msg->outvec[outvec_idx].base;
1248}
1249
1250void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1251 uint32_t outvec_idx, size_t len)
1252{
1253 struct tfm_msg_body_t *msg = NULL;
1254
1255 /* It is a fatal error if message handle is invalid */
1256 msg = tfm_spm_get_msg_from_handle(msg_handle);
1257 if (!msg) {
1258 tfm_core_panic();
1259 }
1260
1261 /*
1262 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1263 * Service that received the message.
1264 */
1265 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1266 tfm_core_panic();
1267 }
1268
1269 /*
1270 * It is a fatal error if message handle does not refer to a request
1271 * message.
1272 */
1273 if (msg->msg.type < PSA_IPC_CALL) {
1274 tfm_core_panic();
1275 }
1276
1277 /*
1278 * It is a fatal error if outvec_idx is equal to or greater than
1279 * PSA_MAX_IOVEC.
1280 */
1281 if (outvec_idx >= PSA_MAX_IOVEC) {
1282 tfm_core_panic();
1283 }
1284
1285 /*
1286 * It is a fatal error if len is greater than the output vector size.
1287 */
1288 if (len > msg->msg.out_size[outvec_idx]) {
1289 tfm_core_panic();
1290 }
1291
1292 /*
1293 * It is a fatal error if The output vector has not been mapped by a call to
1294 * psa_map_outvec().
1295 */
1296 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1297 tfm_core_panic();
1298 }
1299
1300 /*
1301 * It is a fatal error if the output vector has already been unmapped by a
1302 * call to psa_unmap_outvec().
1303 */
1304 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1305 tfm_core_panic();
1306 }
1307
1308 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1309
1310 /* Update the write number */
1311 msg->outvec[outvec_idx].len = len;
1312}
1313
1314#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */