blob: af35a2db12eca91c4fc2aca6af299b190fcbc357 [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Shawn Shanb222d892021-01-04 17:41:48 +08002 * Copyright (c) 2019-2021, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080013#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080014#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080015#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080016#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080017#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080018#include "load/interrupt_defs.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080019#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080020#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080021#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080022#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080023#include "tfm_rpc.h"
24#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080025#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080026#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080027#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080028
Ken Liub3b2cb62021-05-22 00:39:28 +080029#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080030extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080031
Shawn Shan038348e2021-09-08 17:11:04 +080032#if PSA_FRAMEWORK_HAS_MM_IOVEC
33
34/*
35 * The MM-IOVEC status
36 * The max total number of invec and outvec is 8.
37 * Each invec/outvec takes 4 bit, 32 bits in total.
38 *
39 * The encoding format of the MM-IOVEC status:
40 *--------------------------------------------------------------
41 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
42 *--------------------------------------------------------------
43 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
44 *--------------------------------------------------------------
45 *
46 * Take invec[0] as an example:
47 *
48 * bit 0: whether invec[0] has been mapped.
49 * bit 1: whether invec[0] has been unmapped.
50 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
51 * psa_write().
52 * bit 3: reserved for invec[0].
53 */
54
55#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
56#define OUTVEC_IDX_BASE 4 /*
57 * Base index of outvec.
58 * There are four invecs in front of
59 * outvec.
60 */
61#define INVEC_IDX_BASE 0 /* Base index of invec. */
62
63#define IOVEC_MAPPED_BIT (1U << 0)
64#define IOVEC_UNMAPPED_BIT (1U << 1)
65#define IOVEC_ACCESSED_BIT (1U << 2)
66
67#define IOVEC_IS_MAPPED(msg, iovec_idx) \
68 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
69 IOVEC_MAPPED_BIT)
70#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
71 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
72 IOVEC_UNMAPPED_BIT)
73#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
74 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
75 IOVEC_ACCESSED_BIT)
76#define SET_IOVEC_MAPPED(msg, iovec_idx) \
77 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
78 ((iovec_idx) * IOVEC_STATUS_BITS)))
79#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
80 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
81 ((iovec_idx) * IOVEC_STATUS_BITS)))
82#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
83 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
84 ((iovec_idx) * IOVEC_STATUS_BITS)))
85
86#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
87
Mingyang Suneeca4652021-07-15 15:19:16 +080088uint32_t tfm_spm_get_lifecycle_state(void)
89{
90 /*
91 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
92 * implemented in the future.
93 */
94 return PSA_LIFECYCLE_UNKNOWN;
95}
96
97/* PSA Client API function body */
98
Mingyang Sund44522a2020-01-16 16:48:37 +080099uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800100{
101 return PSA_FRAMEWORK_VERSION;
102}
103
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800104uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800105{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800106 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800107 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800108
109 /*
110 * It should return PSA_VERSION_NONE if the RoT Service is not
111 * implemented.
112 */
113 service = tfm_spm_get_service_by_sid(sid);
114 if (!service) {
115 return PSA_VERSION_NONE;
116 }
117
118 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800119 * It should return PSA_VERSION_NONE if the caller is not authorized
120 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800121 */
Ken Liubcae38b2021-01-20 15:47:44 +0800122 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800123 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800124 }
125
Ken Liuacd2a572021-05-12 16:19:04 +0800126 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800127}
128
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800129psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
David Hu733d8f92019-09-23 15:32:40 +0800130{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800131 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800132 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800133 struct tfm_conn_handle_t *connect_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800134 int32_t client_id;
Ken Liu505b1702020-05-29 13:19:58 +0800135 psa_handle_t handle;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800136 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800137
Kevin Pengedb8ee42021-03-09 16:50:11 +0800138 /*
139 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
140 * platform.
141 */
David Hu733d8f92019-09-23 15:32:40 +0800142 service = tfm_spm_get_service_by_sid(sid);
143 if (!service) {
Shawn Shanb222d892021-01-04 17:41:48 +0800144 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
David Hu733d8f92019-09-23 15:32:40 +0800145 }
146
Mingyang Sunef42f442021-06-11 15:07:58 +0800147 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
148 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
149 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
150 }
151
Kevin Pengedb8ee42021-03-09 16:50:11 +0800152 /*
153 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
154 * RoT Service.
155 */
156 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
157 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
158 }
159
160 /*
161 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
162 * not supported on the platform.
163 */
164 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
165 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
166 }
167
Kevin Peng385fda82021-08-18 10:41:19 +0800168 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800169
David Hu733d8f92019-09-23 15:32:40 +0800170 /*
171 * Create connection handle here since it is possible to return the error
172 * code to client when creation fails.
173 */
Summer Qin1ce712a2019-10-14 18:04:05 +0800174 connect_handle = tfm_spm_create_conn_handle(service, client_id);
Summer Qin630c76b2020-05-20 10:32:58 +0800175 if (!connect_handle) {
David Hu733d8f92019-09-23 15:32:40 +0800176 return PSA_ERROR_CONNECTION_BUSY;
177 }
178
Kevin Pengdf6aa292021-03-11 17:58:50 +0800179 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
180 if (!msg) {
181 /* Have no enough resource to create message */
182 return PSA_ERROR_CONNECTION_BUSY;
183 }
David Hu733d8f92019-09-23 15:32:40 +0800184
Ken Liu505b1702020-05-29 13:19:58 +0800185 handle = tfm_spm_to_user_handle(connect_handle);
David Hu733d8f92019-09-23 15:32:40 +0800186 /* No input or output needed for connect message */
Ken Liu505b1702020-05-29 13:19:58 +0800187 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
Summer Qin1ce712a2019-10-14 18:04:05 +0800188 client_id, NULL, 0, NULL, 0, NULL);
David Hu733d8f92019-09-23 15:32:40 +0800189
David Hu733d8f92019-09-23 15:32:40 +0800190
Mingyang Sundeae45d2021-09-06 15:31:07 +0800191 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800192}
193
Mingyang Suneeca4652021-07-15 15:19:16 +0800194psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
195 uint32_t ctrl_param,
196 const psa_invec *inptr,
197 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800198{
199 psa_invec invecs[PSA_MAX_IOVEC];
200 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800201 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800202 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800203 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800204 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800205 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800206 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800207 uint32_t privileged;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800208 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800209 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
210 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
211 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800212
213 /* The request type must be zero or positive. */
214 if (type < 0) {
215 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
216 }
David Hu733d8f92019-09-23 15:32:40 +0800217
Shawn Shanb222d892021-01-04 17:41:48 +0800218 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800219 if ((in_num > PSA_MAX_IOVEC) ||
220 (out_num > PSA_MAX_IOVEC) ||
221 (in_num + out_num > PSA_MAX_IOVEC)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800222 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800223 }
224
Kevin Peng385fda82021-08-18 10:41:19 +0800225 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800226
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800227 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800228 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800229 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800230
231 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
232 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
233 }
234
Mingyang Sun453ad402021-03-17 17:58:33 +0800235 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800236 if (!service) {
237 tfm_core_panic();
238 }
239
Ken Liub3b2cb62021-05-22 00:39:28 +0800240 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800241
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800242 /*
243 * It is a PROGRAMMER ERROR if the caller is not authorized to access
244 * the RoT Service.
245 */
246 if (tfm_spm_check_authorization(sid, service, ns_caller)
247 != SPM_SUCCESS) {
248 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_REFUSED);
249 }
250
Mingyang Sun453ad402021-03-17 17:58:33 +0800251 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
252
253 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
254 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
255 }
256
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800257 conn_handle = tfm_spm_create_conn_handle(service, client_id);
258
259 if (!conn_handle) {
260 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_CONNECTION_BUSY);
261 }
262
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800263 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800264 handle = tfm_spm_to_user_handle(conn_handle);
265 } else {
266 conn_handle = tfm_spm_to_handle_instance(handle);
267
268 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
269 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
270 != SPM_SUCCESS) {
271 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
272 }
273
274 /*
275 * It is a PROGRAMMER ERROR if the connection is currently
276 * handling a request.
277 */
278 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
279 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
280 }
281
282 /*
283 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
284 * has been terminated by the RoT Service.
285 */
286 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
287 return PSA_ERROR_PROGRAMMER_ERROR;
288 }
289
290 service = conn_handle->service;
Summer Qin1ce712a2019-10-14 18:04:05 +0800291 }
Shawn Shanb222d892021-01-04 17:41:48 +0800292
David Hu733d8f92019-09-23 15:32:40 +0800293 if (!service) {
294 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800295 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800296 }
297
Mingyang Sune529e3b2021-07-12 14:46:30 +0800298 privileged = tfm_spm_get_caller_privilege_mode();
299
Kevin Pengedb8ee42021-03-09 16:50:11 +0800300 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800301 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800302 * if the memory reference for the wrap input vector is invalid or not
303 * readable.
304 */
305 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800306 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800307 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800308 }
Summer Qinba2346e2019-11-12 16:26:31 +0800309
David Hu733d8f92019-09-23 15:32:40 +0800310 /*
311 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800312 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800313 * the wrap output vector is invalid or not read-write.
314 */
315 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800316 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800317 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800318 }
319
Summer Qinf24dbb52020-07-23 14:53:54 +0800320 spm_memset(invecs, 0, sizeof(invecs));
321 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800322
323 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800324 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
325 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800326
327 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800328 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800329 * memory reference was invalid or not readable.
330 */
331 for (i = 0; i < in_num; i++) {
332 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800333 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800334 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800335 }
336 }
Summer Qinba2346e2019-11-12 16:26:31 +0800337
338 /*
339 * Clients must never overlap input parameters because of the risk of a
340 * double-fetch inconsistency.
341 * Overflow is checked in tfm_memory_check functions.
342 */
343 for (i = 0; i + 1 < in_num; i++) {
344 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100345 if (!((char *) invecs[j].base + invecs[j].len <=
346 (char *) invecs[i].base ||
347 (char *) invecs[j].base >=
348 (char *) invecs[i].base + invecs[i].len)) {
Shawn Shanb222d892021-01-04 17:41:48 +0800349 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
Summer Qinba2346e2019-11-12 16:26:31 +0800350 }
351 }
352 }
353
David Hu733d8f92019-09-23 15:32:40 +0800354 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800355 * For client output vector, it is a PROGRAMMER ERROR if the provided
356 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800357 */
358 for (i = 0; i < out_num; i++) {
359 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800360 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800361 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
David Hu733d8f92019-09-23 15:32:40 +0800362 }
363 }
364
365 /*
366 * FixMe: Need to check if the message is unrecognized by the RoT
367 * Service or incorrectly formatted.
368 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800369 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
370 if (!msg) {
371 /* FixMe: Need to implement one mechanism to resolve this failure. */
372 TFM_PROGRAMMER_ERROR(ns_caller, PSA_ERROR_PROGRAMMER_ERROR);
373 }
David Hu733d8f92019-09-23 15:32:40 +0800374
Ken Liu505b1702020-05-29 13:19:58 +0800375 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800376 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800377
Mingyang Sundeae45d2021-09-06 15:31:07 +0800378 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800379}
380
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800381void tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800382{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800383 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800384 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800385 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800386 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800387 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800388
389 /* It will have no effect if called with the NULL handle */
390 if (handle == PSA_NULL_HANDLE) {
391 return;
392 }
393
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800394 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800395 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800396 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
397 }
398
Kevin Peng385fda82021-08-18 10:41:19 +0800399 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800400
Summer Qin373feb12020-03-27 15:35:33 +0800401 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800402 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800403 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
404 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800405 */
Ken Liubcae38b2021-01-20 15:47:44 +0800406 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Shawn Shanb222d892021-01-04 17:41:48 +0800407 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Summer Qin1ce712a2019-10-14 18:04:05 +0800408 }
Shawn Shanb222d892021-01-04 17:41:48 +0800409
Summer Qin630c76b2020-05-20 10:32:58 +0800410 service = conn_handle->service;
David Hu733d8f92019-09-23 15:32:40 +0800411 if (!service) {
412 /* FixMe: Need to implement one mechanism to resolve this failure. */
Edison Ai9059ea02019-11-28 13:46:14 +0800413 tfm_core_panic();
David Hu733d8f92019-09-23 15:32:40 +0800414 }
415
Kevin Pengdf6aa292021-03-11 17:58:50 +0800416 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
417 if (!msg) {
418 /* FixMe: Need to implement one mechanism to resolve this failure. */
419 tfm_core_panic();
420 }
David Hu733d8f92019-09-23 15:32:40 +0800421
Shawn Shanb222d892021-01-04 17:41:48 +0800422 /*
423 * It is a PROGRAMMER ERROR if the connection is currently handling a
424 * request.
425 */
Summer Qin630c76b2020-05-20 10:32:58 +0800426 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Shawn Shanb222d892021-01-04 17:41:48 +0800427 TFM_PROGRAMMER_ERROR(ns_caller, PROGRAMMER_ERROR_NULL);
Shawn Shancc39fcb2019-11-13 15:38:16 +0800428 }
429
David Hu733d8f92019-09-23 15:32:40 +0800430 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800431 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800432 NULL, 0, NULL, 0, NULL);
433
Mingyang Sundeae45d2021-09-06 15:31:07 +0800434 (void)backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800435}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800436
Mingyang Suneeca4652021-07-15 15:19:16 +0800437/* PSA Partition API function body */
438
Mingyang Sunb26b2802021-07-07 11:25:00 +0800439psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
440 uint32_t timeout)
441{
442 struct partition_t *partition = NULL;
443
444 /*
445 * Timeout[30:0] are reserved for future use.
446 * SPM must ignore the value of RES.
447 */
448 timeout &= PSA_TIMEOUT_MASK;
449
450 partition = tfm_spm_get_running_partition();
451 if (!partition) {
452 tfm_core_panic();
453 }
454
455 /*
456 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
457 * signals.
458 */
459 if ((partition->signals_allowed & signal_mask) == 0) {
460 tfm_core_panic();
461 }
462
463 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800464 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800465 * In this case, the return value of this function is temporary set into
466 * runtime context. After new signal(s) are available, the return value
467 * is updated with the available signal(s) and blocked thread gets to run.
468 */
469 if (timeout == PSA_BLOCK &&
470 (partition->signals_asserted & signal_mask) == 0) {
471 partition->signals_waiting = signal_mask;
Ken Liu5d73c872021-08-19 19:23:17 +0800472 thrd_wait_on(&partition->waitobj,
473 &(tfm_spm_get_running_partition()->thrd));
Mingyang Sunb26b2802021-07-07 11:25:00 +0800474 }
475
476 return partition->signals_asserted & signal_mask;
477}
478
479psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
480{
481 struct tfm_msg_body_t *tmp_msg = NULL;
482 struct partition_t *partition = NULL;
483 uint32_t privileged;
484
485 /*
486 * Only one message could be retrieved every time for psa_get(). It is a
487 * fatal error if the input signal has more than a signal bit set.
488 */
489 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
490 tfm_core_panic();
491 }
492
493 partition = tfm_spm_get_running_partition();
494 if (!partition) {
495 tfm_core_panic();
496 }
497 privileged = tfm_spm_partition_get_privileged_mode(
498 partition->p_ldinf->flags);
499
500 /*
501 * Write the message to the service buffer. It is a fatal error if the
502 * input msg pointer is not a valid memory reference or not read-write.
503 */
504 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
505 privileged) != SPM_SUCCESS) {
506 tfm_core_panic();
507 }
508
509 /*
510 * It is a fatal error if the caller call psa_get() when no message has
511 * been set. The caller must call this function after an RoT Service signal
512 * is returned by psa_wait().
513 */
514 if (partition->signals_asserted == 0) {
515 tfm_core_panic();
516 }
517
518 /*
519 * It is a fatal error if the RoT Service signal is not currently asserted.
520 */
521 if ((partition->signals_asserted & signal) == 0) {
522 tfm_core_panic();
523 }
524
525 /*
526 * Get message by signal from partition. It is a fatal error if getting
527 * failed, which means the input signal is not correspond to an RoT service.
528 */
529 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
530 if (!tmp_msg) {
531 return PSA_ERROR_DOES_NOT_EXIST;
532 }
533
534 (TO_CONTAINER(tmp_msg,
535 struct tfm_conn_handle_t,
536 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
537
538 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
539
540 return PSA_SUCCESS;
541}
542
543void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
544{
545 struct tfm_msg_body_t *msg = NULL;
546 struct tfm_conn_handle_t *conn_handle;
547
548 /* It is a fatal error if message handle is invalid */
549 msg = tfm_spm_get_msg_from_handle(msg_handle);
550 if (!msg) {
551 tfm_core_panic();
552 }
553
554 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
555 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
556 tfm_core_panic();
557 }
558
559 msg->msg.rhandle = rhandle;
560 conn_handle = tfm_spm_to_handle_instance(msg_handle);
561
562 /* Store reverse handle for following client calls. */
563 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
564}
565
566size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
567 void *buffer, size_t num_bytes)
568{
569 size_t bytes;
570 struct tfm_msg_body_t *msg = NULL;
571 uint32_t privileged;
572 struct partition_t *partition = NULL;
573
574 /* It is a fatal error if message handle is invalid */
575 msg = tfm_spm_get_msg_from_handle(msg_handle);
576 if (!msg) {
577 tfm_core_panic();
578 }
579
580 partition = msg->service->partition;
581 privileged = tfm_spm_partition_get_privileged_mode(
582 partition->p_ldinf->flags);
583
584 /*
585 * It is a fatal error if message handle does not refer to a request
586 * message
587 */
588 if (msg->msg.type < PSA_IPC_CALL) {
589 tfm_core_panic();
590 }
591
592 /*
593 * It is a fatal error if invec_idx is equal to or greater than
594 * PSA_MAX_IOVEC
595 */
596 if (invec_idx >= PSA_MAX_IOVEC) {
597 tfm_core_panic();
598 }
599
600 /* There was no remaining data in this input vector */
601 if (msg->msg.in_size[invec_idx] == 0) {
602 return 0;
603 }
604
Shawn Shan038348e2021-09-08 17:11:04 +0800605#if PSA_FRAMEWORK_HAS_MM_IOVEC
606 /*
607 * It is a fatal error if the input vector has already been mapped using
608 * psa_map_invec().
609 */
610 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
611 tfm_core_panic();
612 }
613
614 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
615#endif
616
Mingyang Sunb26b2802021-07-07 11:25:00 +0800617 /*
618 * Copy the client data to the service buffer. It is a fatal error
619 * if the memory reference for buffer is invalid or not read-write.
620 */
621 if (tfm_memory_check(buffer, num_bytes, false,
622 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
623 tfm_core_panic();
624 }
625
626 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
627 msg->msg.in_size[invec_idx] : num_bytes;
628
629 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
630
631 /* There maybe some remaining data */
632 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
633 msg->msg.in_size[invec_idx] -= bytes;
634
635 return bytes;
636}
637
638size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
639 size_t num_bytes)
640{
641 struct tfm_msg_body_t *msg = NULL;
642
643 /* It is a fatal error if message handle is invalid */
644 msg = tfm_spm_get_msg_from_handle(msg_handle);
645 if (!msg) {
646 tfm_core_panic();
647 }
648
649 /*
650 * It is a fatal error if message handle does not refer to a request
651 * message
652 */
653 if (msg->msg.type < PSA_IPC_CALL) {
654 tfm_core_panic();
655 }
656
657 /*
658 * It is a fatal error if invec_idx is equal to or greater than
659 * PSA_MAX_IOVEC
660 */
661 if (invec_idx >= PSA_MAX_IOVEC) {
662 tfm_core_panic();
663 }
664
665 /* There was no remaining data in this input vector */
666 if (msg->msg.in_size[invec_idx] == 0) {
667 return 0;
668 }
669
Shawn Shan038348e2021-09-08 17:11:04 +0800670#if PSA_FRAMEWORK_HAS_MM_IOVEC
671 /*
672 * It is a fatal error if the input vector has already been mapped using
673 * psa_map_invec().
674 */
675 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
676 tfm_core_panic();
677 }
678
679 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
680#endif
681
Mingyang Sunb26b2802021-07-07 11:25:00 +0800682 /*
683 * If num_bytes is greater than the remaining size of the input vector then
684 * the remaining size of the input vector is used.
685 */
686 if (num_bytes > msg->msg.in_size[invec_idx]) {
687 num_bytes = msg->msg.in_size[invec_idx];
688 }
689
690 /* There maybe some remaining data */
691 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
692 num_bytes;
693 msg->msg.in_size[invec_idx] -= num_bytes;
694
695 return num_bytes;
696}
697
698void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
699 const void *buffer, size_t num_bytes)
700{
701 struct tfm_msg_body_t *msg = NULL;
702 uint32_t privileged;
703 struct partition_t *partition = NULL;
704
705 /* It is a fatal error if message handle is invalid */
706 msg = tfm_spm_get_msg_from_handle(msg_handle);
707 if (!msg) {
708 tfm_core_panic();
709 }
710
711 partition = msg->service->partition;
712 privileged = tfm_spm_partition_get_privileged_mode(
713 partition->p_ldinf->flags);
714
715 /*
716 * It is a fatal error if message handle does not refer to a request
717 * message
718 */
719 if (msg->msg.type < PSA_IPC_CALL) {
720 tfm_core_panic();
721 }
722
723 /*
724 * It is a fatal error if outvec_idx is equal to or greater than
725 * PSA_MAX_IOVEC
726 */
727 if (outvec_idx >= PSA_MAX_IOVEC) {
728 tfm_core_panic();
729 }
730
731 /*
732 * It is a fatal error if the call attempts to write data past the end of
733 * the client output vector
734 */
735 if (num_bytes > msg->msg.out_size[outvec_idx] -
736 msg->outvec[outvec_idx].len) {
737 tfm_core_panic();
738 }
739
Shawn Shan038348e2021-09-08 17:11:04 +0800740#if PSA_FRAMEWORK_HAS_MM_IOVEC
741 /*
742 * It is a fatal error if the output vector has already been mapped using
743 * psa_map_outvec().
744 */
745 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
746 tfm_core_panic();
747 }
748
749 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
750#endif
751
Mingyang Sunb26b2802021-07-07 11:25:00 +0800752 /*
753 * Copy the service buffer to client outvecs. It is a fatal error
754 * if the memory reference for buffer is invalid or not readable.
755 */
756 if (tfm_memory_check(buffer, num_bytes, false,
757 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
758 tfm_core_panic();
759 }
760
761 spm_memcpy((char *)msg->outvec[outvec_idx].base +
762 msg->outvec[outvec_idx].len, buffer, num_bytes);
763
764 /* Update the write number */
765 msg->outvec[outvec_idx].len += num_bytes;
766}
767
768void tfm_spm_partition_psa_reply(psa_handle_t msg_handle, psa_status_t status)
769{
770 struct service_t *service = NULL;
771 struct tfm_msg_body_t *msg = NULL;
772 int32_t ret = PSA_SUCCESS;
773 struct tfm_conn_handle_t *conn_handle;
774
775 /* It is a fatal error if message handle is invalid */
776 msg = tfm_spm_get_msg_from_handle(msg_handle);
777 if (!msg) {
778 tfm_core_panic();
779 }
780
781 /*
782 * RoT Service information is needed in this function, stored it in message
783 * body structure. Only two parameters are passed in this function: handle
784 * and status, so it is useful and simply to do like this.
785 */
786 service = msg->service;
787 if (!service) {
788 tfm_core_panic();
789 }
790
791 /*
792 * Three type of message are passed in this function: CONNECTION, REQUEST,
793 * DISCONNECTION. It needs to process differently for each type.
794 */
795 conn_handle = tfm_spm_to_handle_instance(msg_handle);
796 switch (msg->msg.type) {
797 case PSA_IPC_CONNECT:
798 /*
799 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
800 * input status is PSA_SUCCESS. Others return values are based on the
801 * input status.
802 */
803 if (status == PSA_SUCCESS) {
804 ret = msg_handle;
805 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
806 /* Refuse the client connection, indicating a permanent error. */
807 tfm_spm_free_conn_handle(service, conn_handle);
808 ret = PSA_ERROR_CONNECTION_REFUSED;
809 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
810 /* Fail the client connection, indicating a transient error. */
811 ret = PSA_ERROR_CONNECTION_BUSY;
812 } else {
813 tfm_core_panic();
814 }
815 break;
816 case PSA_IPC_DISCONNECT:
817 /* Service handle is not used anymore */
818 tfm_spm_free_conn_handle(service, conn_handle);
819
820 /*
821 * If the message type is PSA_IPC_DISCONNECT, then the status code is
822 * ignored
823 */
824 break;
825 default:
826 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800827
828#if PSA_FRAMEWORK_HAS_MM_IOVEC
829
830 /*
831 * If the unmapped function is not called for an input/output vector
832 * that has been mapped, the framework will remove the mapping.
833 */
834 int i;
835
836 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
837 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
838 SET_IOVEC_UNMAPPED(msg, i);
839 /*
840 * Any output vectors that are still mapped will report that
841 * zero bytes have been written.
842 */
843 if (i >= OUTVEC_IDX_BASE) {
844 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
845 }
846 }
847 }
848
849#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800850 /* Reply to a request message. Return values are based on status */
851 ret = status;
852 /*
853 * The total number of bytes written to a single parameter must be
854 * reported to the client by updating the len member of the
855 * psa_outvec structure for the parameter before returning from
856 * psa_call().
857 */
858 update_caller_outvec_len(msg);
859 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
860 tfm_spm_free_conn_handle(service, conn_handle);
861 }
862 } else {
863 tfm_core_panic();
864 }
865 }
866
867 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
868 /*
869 * If the source of the programmer error is a Secure Partition, the SPM
870 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
871 */
872 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
873 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
874 } else {
875 tfm_core_panic();
876 }
877 } else {
878 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
879 }
880
Ken Liu802a3702021-10-15 12:09:56 +0800881 backend_instance.replying(msg, ret);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800882}
883
884void tfm_spm_partition_psa_notify(int32_t partition_id)
885{
Ken Liu5d73c872021-08-19 19:23:17 +0800886 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
887
888 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800889}
890
891void tfm_spm_partition_psa_clear(void)
892{
Ken Liu92ede9f2021-10-20 09:35:00 +0800893 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800894 struct partition_t *partition = NULL;
895
896 partition = tfm_spm_get_running_partition();
897 if (!partition) {
898 tfm_core_panic();
899 }
900
901 /*
902 * It is a fatal error if the Secure Partition's doorbell signal is not
903 * currently asserted.
904 */
905 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
906 tfm_core_panic();
907 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800908
909 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800910 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800911 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800912}
913
914void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
915{
Ken Liu92ede9f2021-10-20 09:35:00 +0800916 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800917 struct irq_load_info_t *irq_info = NULL;
918 struct partition_t *partition = NULL;
919
920 partition = tfm_spm_get_running_partition();
921 if (!partition) {
922 tfm_core_panic();
923 }
924
925 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
926 /* It is a fatal error if passed signal is not an interrupt signal. */
927 if (!irq_info) {
928 tfm_core_panic();
929 }
930
931 if (irq_info->flih_func) {
932 /* This API is for SLIH IRQs only */
933 psa_panic();
934 }
935
936 /* It is a fatal error if passed signal is not currently asserted */
937 if ((partition->signals_asserted & irq_signal) == 0) {
938 tfm_core_panic();
939 }
940
Ken Liu92ede9f2021-10-20 09:35:00 +0800941 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800942 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800943 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800944
Kevin Pengd399a1f2021-09-08 15:33:14 +0800945 tfm_hal_irq_clear_pending(irq_info->source);
946 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800947}
948
949void tfm_spm_partition_psa_panic(void)
950{
951 /*
952 * PSA FF recommends that the SPM causes the system to restart when a secure
953 * partition panics.
954 */
955 tfm_hal_system_reset();
956}
957
958void tfm_spm_partition_irq_enable(psa_signal_t irq_signal)
959{
960 struct partition_t *partition;
961 struct irq_load_info_t *irq_info;
962
963 partition = tfm_spm_get_running_partition();
964 if (!partition) {
965 tfm_core_panic();
966 }
967
968 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
969 if (!irq_info) {
970 tfm_core_panic();
971 }
972
Kevin Pengd399a1f2021-09-08 15:33:14 +0800973 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800974}
975
976psa_irq_status_t tfm_spm_partition_irq_disable(psa_signal_t irq_signal)
977{
978 struct partition_t *partition;
979 struct irq_load_info_t *irq_info;
980
981 partition = tfm_spm_get_running_partition();
982 if (!partition) {
983 tfm_core_panic();
984 }
985
986 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
987 if (!irq_info) {
988 tfm_core_panic();
989 }
990
Kevin Pengd399a1f2021-09-08 15:33:14 +0800991 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800992
993 return 1;
994}
995
996void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
997{
Ken Liu92ede9f2021-10-20 09:35:00 +0800998 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800999 struct irq_load_info_t *irq_info;
1000 struct partition_t *partition;
1001
1002 partition = tfm_spm_get_running_partition();
1003 if (!partition) {
1004 tfm_core_panic();
1005 }
1006
1007 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1008 if (!irq_info) {
1009 tfm_core_panic();
1010 }
1011
1012 if (!irq_info->flih_func) {
1013 /* This API is for FLIH IRQs only */
1014 tfm_core_panic();
1015 }
1016
1017 if ((partition->signals_asserted & irq_signal) == 0) {
1018 /* The signal is not asserted */
1019 tfm_core_panic();
1020 }
1021
Ken Liu92ede9f2021-10-20 09:35:00 +08001022 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001023 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001024 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001025}
Shawn Shan038348e2021-09-08 17:11:04 +08001026
1027#if PSA_FRAMEWORK_HAS_MM_IOVEC
1028
1029const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1030 uint32_t invec_idx)
1031{
1032 struct tfm_msg_body_t *msg = NULL;
1033 uint32_t privileged;
1034 struct partition_t *partition = NULL;
1035
1036 /* It is a fatal error if message handle is invalid */
1037 msg = tfm_spm_get_msg_from_handle(msg_handle);
1038 if (!msg) {
1039 tfm_core_panic();
1040 }
1041
1042 partition = msg->service->partition;
1043 privileged = tfm_spm_partition_get_privileged_mode(
1044 partition->p_ldinf->flags);
1045
1046 /*
1047 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1048 * Service that received the message.
1049 */
1050 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1051 tfm_core_panic();
1052 }
1053
1054 /*
1055 * It is a fatal error if message handle does not refer to a request
1056 * message.
1057 */
1058 if (msg->msg.type < PSA_IPC_CALL) {
1059 tfm_core_panic();
1060 }
1061
1062 /*
1063 * It is a fatal error if invec_idx is equal to or greater than
1064 * PSA_MAX_IOVEC.
1065 */
1066 if (invec_idx >= PSA_MAX_IOVEC) {
1067 tfm_core_panic();
1068 }
1069
1070 /* It is a fatal error if the input vector has length zero. */
1071 if (msg->msg.in_size[invec_idx] == 0) {
1072 tfm_core_panic();
1073 }
1074
1075 /*
1076 * It is a fatal error if the input vector has already been mapped using
1077 * psa_map_invec().
1078 */
1079 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1080 tfm_core_panic();
1081 }
1082
1083 /*
1084 * It is a fatal error if the input vector has already been accessed
1085 * using psa_read() or psa_skip().
1086 */
1087 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1088 tfm_core_panic();
1089 }
1090
1091 /*
1092 * It is a fatal error if the memory reference for the wrap input vector is
1093 * invalid or not readable.
1094 */
1095 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1096 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1097 tfm_core_panic();
1098 }
1099
1100 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1101
1102 return msg->invec[invec_idx].base;
1103}
1104
1105void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1106 uint32_t invec_idx)
1107{
1108 struct tfm_msg_body_t *msg = NULL;
1109
1110 /* It is a fatal error if message handle is invalid */
1111 msg = tfm_spm_get_msg_from_handle(msg_handle);
1112 if (!msg) {
1113 tfm_core_panic();
1114 }
1115
1116 /*
1117 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1118 * Service that received the message.
1119 */
1120 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1121 tfm_core_panic();
1122 }
1123
1124 /*
1125 * It is a fatal error if message handle does not refer to a request
1126 * message.
1127 */
1128 if (msg->msg.type < PSA_IPC_CALL) {
1129 tfm_core_panic();
1130 }
1131
1132 /*
1133 * It is a fatal error if invec_idx is equal to or greater than
1134 * PSA_MAX_IOVEC.
1135 */
1136 if (invec_idx >= PSA_MAX_IOVEC) {
1137 tfm_core_panic();
1138 }
1139
1140 /*
1141 * It is a fatal error if The input vector has not been mapped by a call to
1142 * psa_map_invec().
1143 */
1144 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1145 tfm_core_panic();
1146 }
1147
1148 /*
1149 * It is a fatal error if the input vector has already been unmapped by a
1150 * call to psa_unmap_invec().
1151 */
1152 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1153 tfm_core_panic();
1154 }
1155
1156 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1157}
1158
1159void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1160 uint32_t outvec_idx)
1161{
1162 struct tfm_msg_body_t *msg = NULL;
1163 uint32_t privileged;
1164 struct partition_t *partition = NULL;
1165
1166 /* It is a fatal error if message handle is invalid */
1167 msg = tfm_spm_get_msg_from_handle(msg_handle);
1168 if (!msg) {
1169 tfm_core_panic();
1170 }
1171
1172 partition = msg->service->partition;
1173 privileged = tfm_spm_partition_get_privileged_mode(
1174 partition->p_ldinf->flags);
1175
1176 /*
1177 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1178 * Service that received the message.
1179 */
1180 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1181 tfm_core_panic();
1182 }
1183
1184 /*
1185 * It is a fatal error if message handle does not refer to a request
1186 * message.
1187 */
1188 if (msg->msg.type < PSA_IPC_CALL) {
1189 tfm_core_panic();
1190 }
1191
1192 /*
1193 * It is a fatal error if outvec_idx is equal to or greater than
1194 * PSA_MAX_IOVEC.
1195 */
1196 if (outvec_idx >= PSA_MAX_IOVEC) {
1197 tfm_core_panic();
1198 }
1199
1200 /* It is a fatal error if the output vector has length zero. */
1201 if (msg->msg.out_size[outvec_idx] == 0) {
1202 tfm_core_panic();
1203 }
1204
1205 /*
1206 * It is a fatal error if the output vector has already been mapped using
1207 * psa_map_outvec().
1208 */
1209 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1210 tfm_core_panic();
1211 }
1212
1213 /*
1214 * It is a fatal error if the output vector has already been accessed
1215 * using psa_write().
1216 */
1217 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1218 tfm_core_panic();
1219 }
1220
1221 /*
1222 * It is a fatal error if the output vector is invalid or not read-write.
1223 */
1224 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1225 msg->outvec[outvec_idx].len, false,
1226 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1227 tfm_core_panic();
1228 }
1229 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1230
1231 return msg->outvec[outvec_idx].base;
1232}
1233
1234void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1235 uint32_t outvec_idx, size_t len)
1236{
1237 struct tfm_msg_body_t *msg = NULL;
1238
1239 /* It is a fatal error if message handle is invalid */
1240 msg = tfm_spm_get_msg_from_handle(msg_handle);
1241 if (!msg) {
1242 tfm_core_panic();
1243 }
1244
1245 /*
1246 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1247 * Service that received the message.
1248 */
1249 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1250 tfm_core_panic();
1251 }
1252
1253 /*
1254 * It is a fatal error if message handle does not refer to a request
1255 * message.
1256 */
1257 if (msg->msg.type < PSA_IPC_CALL) {
1258 tfm_core_panic();
1259 }
1260
1261 /*
1262 * It is a fatal error if outvec_idx is equal to or greater than
1263 * PSA_MAX_IOVEC.
1264 */
1265 if (outvec_idx >= PSA_MAX_IOVEC) {
1266 tfm_core_panic();
1267 }
1268
1269 /*
1270 * It is a fatal error if len is greater than the output vector size.
1271 */
1272 if (len > msg->msg.out_size[outvec_idx]) {
1273 tfm_core_panic();
1274 }
1275
1276 /*
1277 * It is a fatal error if The output vector has not been mapped by a call to
1278 * psa_map_outvec().
1279 */
1280 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1281 tfm_core_panic();
1282 }
1283
1284 /*
1285 * It is a fatal error if the output vector has already been unmapped by a
1286 * call to psa_unmap_outvec().
1287 */
1288 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1289 tfm_core_panic();
1290 }
1291
1292 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1293
1294 /* Update the write number */
1295 msg->outvec[outvec_idx].len = len;
1296}
1297
1298#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */