blob: 7b8342b730e6462f077727842eb5f4cd5d7238da [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Mingyang Sunbb4a42a2021-12-14 15:18:52 +08002 * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Kevin Peng3f67b2e2021-10-18 17:47:27 +080013#include "interrupt.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080014#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080015#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080016#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080017#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080018#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080019#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080020#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080021#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080023#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080024#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080025#include "tfm_rpc.h"
26#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080027#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080028#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080029#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080030
Ken Liub3b2cb62021-05-22 00:39:28 +080031#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080032extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080033
Shawn Shan038348e2021-09-08 17:11:04 +080034#if PSA_FRAMEWORK_HAS_MM_IOVEC
35
36/*
37 * The MM-IOVEC status
38 * The max total number of invec and outvec is 8.
39 * Each invec/outvec takes 4 bit, 32 bits in total.
40 *
41 * The encoding format of the MM-IOVEC status:
42 *--------------------------------------------------------------
43 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
44 *--------------------------------------------------------------
45 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
46 *--------------------------------------------------------------
47 *
48 * Take invec[0] as an example:
49 *
50 * bit 0: whether invec[0] has been mapped.
51 * bit 1: whether invec[0] has been unmapped.
52 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
53 * psa_write().
54 * bit 3: reserved for invec[0].
55 */
56
57#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
58#define OUTVEC_IDX_BASE 4 /*
59 * Base index of outvec.
60 * There are four invecs in front of
61 * outvec.
62 */
63#define INVEC_IDX_BASE 0 /* Base index of invec. */
64
65#define IOVEC_MAPPED_BIT (1U << 0)
66#define IOVEC_UNMAPPED_BIT (1U << 1)
67#define IOVEC_ACCESSED_BIT (1U << 2)
68
69#define IOVEC_IS_MAPPED(msg, iovec_idx) \
70 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
71 IOVEC_MAPPED_BIT)
72#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
73 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
74 IOVEC_UNMAPPED_BIT)
75#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
76 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
77 IOVEC_ACCESSED_BIT)
78#define SET_IOVEC_MAPPED(msg, iovec_idx) \
79 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
80 ((iovec_idx) * IOVEC_STATUS_BITS)))
81#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
82 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
83 ((iovec_idx) * IOVEC_STATUS_BITS)))
84#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
85 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
86 ((iovec_idx) * IOVEC_STATUS_BITS)))
87
88#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
89
Xinyu Zhangb287ef82021-11-03 18:38:50 +080090void spm_handle_programmer_errors(psa_status_t status)
91{
92 if (status == PSA_ERROR_PROGRAMMER_ERROR ||
93 status == PSA_ERROR_CONNECTION_REFUSED ||
94 status == PSA_ERROR_CONNECTION_BUSY) {
95 if (!tfm_spm_is_ns_caller()) {
96 tfm_core_panic();
97 }
98 }
99}
100
Mingyang Suneeca4652021-07-15 15:19:16 +0800101uint32_t tfm_spm_get_lifecycle_state(void)
102{
103 /*
104 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
105 * implemented in the future.
106 */
107 return PSA_LIFECYCLE_UNKNOWN;
108}
109
110/* PSA Client API function body */
111
Mingyang Sund44522a2020-01-16 16:48:37 +0800112uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800113{
114 return PSA_FRAMEWORK_VERSION;
115}
116
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800117uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800118{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800119 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800120 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800121
122 /*
123 * It should return PSA_VERSION_NONE if the RoT Service is not
124 * implemented.
125 */
126 service = tfm_spm_get_service_by_sid(sid);
127 if (!service) {
128 return PSA_VERSION_NONE;
129 }
130
131 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800132 * It should return PSA_VERSION_NONE if the caller is not authorized
133 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800134 */
Ken Liubcae38b2021-01-20 15:47:44 +0800135 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800136 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800137 }
138
Ken Liuacd2a572021-05-12 16:19:04 +0800139 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800140}
141
Mingyang Suneeca4652021-07-15 15:19:16 +0800142psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
143 uint32_t ctrl_param,
144 const psa_invec *inptr,
145 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800146{
147 psa_invec invecs[PSA_MAX_IOVEC];
148 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800149 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800150 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800151 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800152 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800153 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800154 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800155 uint32_t privileged;
Mingyang Sun620c8562021-11-10 11:44:58 +0800156 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800157 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800158 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
159 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
160 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800161
162 /* The request type must be zero or positive. */
163 if (type < 0) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800164 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800165 }
David Hu733d8f92019-09-23 15:32:40 +0800166
Shawn Shanb222d892021-01-04 17:41:48 +0800167 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800168 if ((in_num > PSA_MAX_IOVEC) ||
169 (out_num > PSA_MAX_IOVEC) ||
170 (in_num + out_num > PSA_MAX_IOVEC)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800171 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800172 }
173
Kevin Peng385fda82021-08-18 10:41:19 +0800174 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800175
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800176 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800177 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800178 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800179
180 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800181 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sune8d38082021-03-30 18:34:40 +0800182 }
183
Mingyang Sun453ad402021-03-17 17:58:33 +0800184 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800185 if (!service) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800186 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun86213242021-07-14 10:26:43 +0800187 }
188
Ken Liub3b2cb62021-05-22 00:39:28 +0800189 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800190
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800191 /*
192 * It is a PROGRAMMER ERROR if the caller is not authorized to access
193 * the RoT Service.
194 */
195 if (tfm_spm_check_authorization(sid, service, ns_caller)
196 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800197 return PSA_ERROR_CONNECTION_REFUSED;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800198 }
199
Mingyang Sun453ad402021-03-17 17:58:33 +0800200 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
201
202 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800203 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun453ad402021-03-17 17:58:33 +0800204 }
205
Mingyang Sun620c8562021-11-10 11:44:58 +0800206 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800207 conn_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800208 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800209
210 if (!conn_handle) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800211 return PSA_ERROR_CONNECTION_BUSY;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800212 }
213
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800214 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800215 handle = tfm_spm_to_user_handle(conn_handle);
216 } else {
217 conn_handle = tfm_spm_to_handle_instance(handle);
218
219 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
220 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
221 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800222 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800223 }
224
225 /*
226 * It is a PROGRAMMER ERROR if the connection is currently
227 * handling a request.
228 */
229 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800230 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800231 }
232
233 /*
234 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
235 * has been terminated by the RoT Service.
236 */
237 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
238 return PSA_ERROR_PROGRAMMER_ERROR;
239 }
240
Ken Liuf39d8eb2021-10-07 12:55:33 +0800241 service = conn_handle->internal_msg.service;
Shawn Shanb222d892021-01-04 17:41:48 +0800242
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800243 if (!service) {
244 /* FixMe: Need to implement a mechanism to resolve this failure. */
245 return PSA_ERROR_PROGRAMMER_ERROR;
246 }
David Hu733d8f92019-09-23 15:32:40 +0800247 }
248
Mingyang Sune529e3b2021-07-12 14:46:30 +0800249 privileged = tfm_spm_get_caller_privilege_mode();
250
Kevin Pengedb8ee42021-03-09 16:50:11 +0800251 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800252 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800253 * if the memory reference for the wrap input vector is invalid or not
254 * readable.
255 */
256 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800257 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800258 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800259 }
Summer Qinba2346e2019-11-12 16:26:31 +0800260
David Hu733d8f92019-09-23 15:32:40 +0800261 /*
262 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800263 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800264 * the wrap output vector is invalid or not read-write.
265 */
266 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800267 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800268 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800269 }
270
Summer Qinf24dbb52020-07-23 14:53:54 +0800271 spm_memset(invecs, 0, sizeof(invecs));
272 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800273
274 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800275 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
276 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800277
278 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800279 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800280 * memory reference was invalid or not readable.
281 */
282 for (i = 0; i < in_num; i++) {
283 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800284 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800285 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800286 }
287 }
Summer Qinba2346e2019-11-12 16:26:31 +0800288
289 /*
290 * Clients must never overlap input parameters because of the risk of a
291 * double-fetch inconsistency.
292 * Overflow is checked in tfm_memory_check functions.
293 */
294 for (i = 0; i + 1 < in_num; i++) {
295 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100296 if (!((char *) invecs[j].base + invecs[j].len <=
297 (char *) invecs[i].base ||
298 (char *) invecs[j].base >=
299 (char *) invecs[i].base + invecs[i].len)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800300 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qinba2346e2019-11-12 16:26:31 +0800301 }
302 }
303 }
304
David Hu733d8f92019-09-23 15:32:40 +0800305 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800306 * For client output vector, it is a PROGRAMMER ERROR if the provided
307 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800308 */
309 for (i = 0; i < out_num; i++) {
310 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800311 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800312 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800313 }
314 }
315
316 /*
317 * FixMe: Need to check if the message is unrecognized by the RoT
318 * Service or incorrectly formatted.
319 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800320 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
David Hu733d8f92019-09-23 15:32:40 +0800321
Ken Liu505b1702020-05-29 13:19:58 +0800322 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800323 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800324
Mingyang Sundeae45d2021-09-06 15:31:07 +0800325 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800326}
327
Xinyu Zhang2bc4d572021-12-27 16:37:46 +0800328/* Following PSA APIs are only needed by connection-based services */
329#if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
330
331psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
332{
333 struct service_t *service;
334 struct tfm_msg_body_t *msg;
335 struct tfm_conn_handle_t *connect_handle;
336 int32_t client_id;
337 psa_handle_t handle;
338 bool ns_caller = tfm_spm_is_ns_caller();
339 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
340
341 /*
342 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
343 * platform.
344 */
345 service = tfm_spm_get_service_by_sid(sid);
346 if (!service) {
347 return PSA_ERROR_CONNECTION_REFUSED;
348 }
349
350 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
351 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
352 return PSA_ERROR_PROGRAMMER_ERROR;
353 }
354
355 /*
356 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
357 * RoT Service.
358 */
359 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
360 return PSA_ERROR_CONNECTION_REFUSED;
361 }
362
363 /*
364 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
365 * not supported on the platform.
366 */
367 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
368 return PSA_ERROR_CONNECTION_REFUSED;
369 }
370
371 client_id = tfm_spm_get_client_id(ns_caller);
372
373 /*
374 * Create connection handle here since it is possible to return the error
375 * code to client when creation fails.
376 */
377 CRITICAL_SECTION_ENTER(cs_assert);
378 connect_handle = tfm_spm_create_conn_handle(service, client_id);
379 CRITICAL_SECTION_LEAVE(cs_assert);
380 if (!connect_handle) {
381 return PSA_ERROR_CONNECTION_BUSY;
382 }
383
384 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
385
386 handle = tfm_spm_to_user_handle(connect_handle);
387 /* No input or output needed for connect message */
388 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
389 client_id, NULL, 0, NULL, 0, NULL);
390
391 return backend_instance.messaging(service, msg);
392}
393
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800394psa_status_t tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800395{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800396 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800397 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800398 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800399 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800400 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800401
402 /* It will have no effect if called with the NULL handle */
403 if (handle == PSA_NULL_HANDLE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800404 return PSA_SUCCESS;
David Hu733d8f92019-09-23 15:32:40 +0800405 }
406
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800407 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800408 if (IS_STATIC_HANDLE(handle)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800409 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800410 }
411
Kevin Peng385fda82021-08-18 10:41:19 +0800412 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800413
Summer Qin373feb12020-03-27 15:35:33 +0800414 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800415 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800416 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
417 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800418 */
Ken Liubcae38b2021-01-20 15:47:44 +0800419 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800420 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qin1ce712a2019-10-14 18:04:05 +0800421 }
Shawn Shanb222d892021-01-04 17:41:48 +0800422
Ken Liuf39d8eb2021-10-07 12:55:33 +0800423 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800424 if (!service) {
425 /* FixMe: Need to implement one mechanism to resolve this failure. */
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800426 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800427 }
428
Kevin Pengdf6aa292021-03-11 17:58:50 +0800429 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
David Hu733d8f92019-09-23 15:32:40 +0800430
Shawn Shanb222d892021-01-04 17:41:48 +0800431 /*
432 * It is a PROGRAMMER ERROR if the connection is currently handling a
433 * request.
434 */
Summer Qin630c76b2020-05-20 10:32:58 +0800435 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800436 return PSA_ERROR_PROGRAMMER_ERROR;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800437 }
438
David Hu733d8f92019-09-23 15:32:40 +0800439 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800440 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800441 NULL, 0, NULL, 0, NULL);
442
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800443 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800444}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800445
Xinyu Zhang2bc4d572021-12-27 16:37:46 +0800446#endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
447
Mingyang Suneeca4652021-07-15 15:19:16 +0800448/* PSA Partition API function body */
449
Mingyang Sunb26b2802021-07-07 11:25:00 +0800450psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
451 uint32_t timeout)
452{
453 struct partition_t *partition = NULL;
454
455 /*
456 * Timeout[30:0] are reserved for future use.
457 * SPM must ignore the value of RES.
458 */
459 timeout &= PSA_TIMEOUT_MASK;
460
461 partition = tfm_spm_get_running_partition();
462 if (!partition) {
463 tfm_core_panic();
464 }
465
466 /*
467 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
468 * signals.
469 */
470 if ((partition->signals_allowed & signal_mask) == 0) {
471 tfm_core_panic();
472 }
473
474 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800475 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800476 * In this case, the return value of this function is temporary set into
477 * runtime context. After new signal(s) are available, the return value
478 * is updated with the available signal(s) and blocked thread gets to run.
479 */
480 if (timeout == PSA_BLOCK &&
481 (partition->signals_asserted & signal_mask) == 0) {
482 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800483 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800484 }
485
486 return partition->signals_asserted & signal_mask;
487}
488
489psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
490{
491 struct tfm_msg_body_t *tmp_msg = NULL;
492 struct partition_t *partition = NULL;
493 uint32_t privileged;
494
495 /*
496 * Only one message could be retrieved every time for psa_get(). It is a
497 * fatal error if the input signal has more than a signal bit set.
498 */
499 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
500 tfm_core_panic();
501 }
502
503 partition = tfm_spm_get_running_partition();
504 if (!partition) {
505 tfm_core_panic();
506 }
507 privileged = tfm_spm_partition_get_privileged_mode(
508 partition->p_ldinf->flags);
509
510 /*
511 * Write the message to the service buffer. It is a fatal error if the
512 * input msg pointer is not a valid memory reference or not read-write.
513 */
514 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
515 privileged) != SPM_SUCCESS) {
516 tfm_core_panic();
517 }
518
519 /*
520 * It is a fatal error if the caller call psa_get() when no message has
521 * been set. The caller must call this function after an RoT Service signal
522 * is returned by psa_wait().
523 */
524 if (partition->signals_asserted == 0) {
525 tfm_core_panic();
526 }
527
528 /*
529 * It is a fatal error if the RoT Service signal is not currently asserted.
530 */
531 if ((partition->signals_asserted & signal) == 0) {
532 tfm_core_panic();
533 }
534
535 /*
536 * Get message by signal from partition. It is a fatal error if getting
537 * failed, which means the input signal is not correspond to an RoT service.
538 */
539 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
540 if (!tmp_msg) {
541 return PSA_ERROR_DOES_NOT_EXIST;
542 }
543
544 (TO_CONTAINER(tmp_msg,
545 struct tfm_conn_handle_t,
546 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
547
548 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
549
550 return PSA_SUCCESS;
551}
552
Mingyang Sunb26b2802021-07-07 11:25:00 +0800553size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
554 void *buffer, size_t num_bytes)
555{
556 size_t bytes;
557 struct tfm_msg_body_t *msg = NULL;
558 uint32_t privileged;
559 struct partition_t *partition = NULL;
560
561 /* It is a fatal error if message handle is invalid */
562 msg = tfm_spm_get_msg_from_handle(msg_handle);
563 if (!msg) {
564 tfm_core_panic();
565 }
566
567 partition = msg->service->partition;
568 privileged = tfm_spm_partition_get_privileged_mode(
569 partition->p_ldinf->flags);
570
571 /*
572 * It is a fatal error if message handle does not refer to a request
573 * message
574 */
575 if (msg->msg.type < PSA_IPC_CALL) {
576 tfm_core_panic();
577 }
578
579 /*
580 * It is a fatal error if invec_idx is equal to or greater than
581 * PSA_MAX_IOVEC
582 */
583 if (invec_idx >= PSA_MAX_IOVEC) {
584 tfm_core_panic();
585 }
586
587 /* There was no remaining data in this input vector */
588 if (msg->msg.in_size[invec_idx] == 0) {
589 return 0;
590 }
591
Shawn Shan038348e2021-09-08 17:11:04 +0800592#if PSA_FRAMEWORK_HAS_MM_IOVEC
593 /*
594 * It is a fatal error if the input vector has already been mapped using
595 * psa_map_invec().
596 */
597 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
598 tfm_core_panic();
599 }
600
601 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
602#endif
603
Mingyang Sunb26b2802021-07-07 11:25:00 +0800604 /*
605 * Copy the client data to the service buffer. It is a fatal error
606 * if the memory reference for buffer is invalid or not read-write.
607 */
608 if (tfm_memory_check(buffer, num_bytes, false,
609 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
610 tfm_core_panic();
611 }
612
613 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
614 msg->msg.in_size[invec_idx] : num_bytes;
615
616 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
617
618 /* There maybe some remaining data */
619 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
620 msg->msg.in_size[invec_idx] -= bytes;
621
622 return bytes;
623}
624
625size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
626 size_t num_bytes)
627{
628 struct tfm_msg_body_t *msg = NULL;
629
630 /* It is a fatal error if message handle is invalid */
631 msg = tfm_spm_get_msg_from_handle(msg_handle);
632 if (!msg) {
633 tfm_core_panic();
634 }
635
636 /*
637 * It is a fatal error if message handle does not refer to a request
638 * message
639 */
640 if (msg->msg.type < PSA_IPC_CALL) {
641 tfm_core_panic();
642 }
643
644 /*
645 * It is a fatal error if invec_idx is equal to or greater than
646 * PSA_MAX_IOVEC
647 */
648 if (invec_idx >= PSA_MAX_IOVEC) {
649 tfm_core_panic();
650 }
651
652 /* There was no remaining data in this input vector */
653 if (msg->msg.in_size[invec_idx] == 0) {
654 return 0;
655 }
656
Shawn Shan038348e2021-09-08 17:11:04 +0800657#if PSA_FRAMEWORK_HAS_MM_IOVEC
658 /*
659 * It is a fatal error if the input vector has already been mapped using
660 * psa_map_invec().
661 */
662 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
663 tfm_core_panic();
664 }
665
666 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
667#endif
668
Mingyang Sunb26b2802021-07-07 11:25:00 +0800669 /*
670 * If num_bytes is greater than the remaining size of the input vector then
671 * the remaining size of the input vector is used.
672 */
673 if (num_bytes > msg->msg.in_size[invec_idx]) {
674 num_bytes = msg->msg.in_size[invec_idx];
675 }
676
677 /* There maybe some remaining data */
678 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
679 num_bytes;
680 msg->msg.in_size[invec_idx] -= num_bytes;
681
682 return num_bytes;
683}
684
685void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
686 const void *buffer, size_t num_bytes)
687{
688 struct tfm_msg_body_t *msg = NULL;
689 uint32_t privileged;
690 struct partition_t *partition = NULL;
691
692 /* It is a fatal error if message handle is invalid */
693 msg = tfm_spm_get_msg_from_handle(msg_handle);
694 if (!msg) {
695 tfm_core_panic();
696 }
697
698 partition = msg->service->partition;
699 privileged = tfm_spm_partition_get_privileged_mode(
700 partition->p_ldinf->flags);
701
702 /*
703 * It is a fatal error if message handle does not refer to a request
704 * message
705 */
706 if (msg->msg.type < PSA_IPC_CALL) {
707 tfm_core_panic();
708 }
709
710 /*
711 * It is a fatal error if outvec_idx is equal to or greater than
712 * PSA_MAX_IOVEC
713 */
714 if (outvec_idx >= PSA_MAX_IOVEC) {
715 tfm_core_panic();
716 }
717
718 /*
719 * It is a fatal error if the call attempts to write data past the end of
720 * the client output vector
721 */
722 if (num_bytes > msg->msg.out_size[outvec_idx] -
723 msg->outvec[outvec_idx].len) {
724 tfm_core_panic();
725 }
726
Shawn Shan038348e2021-09-08 17:11:04 +0800727#if PSA_FRAMEWORK_HAS_MM_IOVEC
728 /*
729 * It is a fatal error if the output vector has already been mapped using
730 * psa_map_outvec().
731 */
732 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
733 tfm_core_panic();
734 }
735
736 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
737#endif
738
Mingyang Sunb26b2802021-07-07 11:25:00 +0800739 /*
740 * Copy the service buffer to client outvecs. It is a fatal error
741 * if the memory reference for buffer is invalid or not readable.
742 */
743 if (tfm_memory_check(buffer, num_bytes, false,
744 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
745 tfm_core_panic();
746 }
747
748 spm_memcpy((char *)msg->outvec[outvec_idx].base +
749 msg->outvec[outvec_idx].len, buffer, num_bytes);
750
751 /* Update the write number */
752 msg->outvec[outvec_idx].len += num_bytes;
753}
754
Ken Liuf39d8eb2021-10-07 12:55:33 +0800755int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
756 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800757{
758 struct service_t *service = NULL;
759 struct tfm_msg_body_t *msg = NULL;
760 int32_t ret = PSA_SUCCESS;
761 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun620c8562021-11-10 11:44:58 +0800762 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800763
764 /* It is a fatal error if message handle is invalid */
765 msg = tfm_spm_get_msg_from_handle(msg_handle);
766 if (!msg) {
767 tfm_core_panic();
768 }
769
770 /*
771 * RoT Service information is needed in this function, stored it in message
772 * body structure. Only two parameters are passed in this function: handle
773 * and status, so it is useful and simply to do like this.
774 */
775 service = msg->service;
776 if (!service) {
777 tfm_core_panic();
778 }
779
780 /*
781 * Three type of message are passed in this function: CONNECTION, REQUEST,
782 * DISCONNECTION. It needs to process differently for each type.
783 */
784 conn_handle = tfm_spm_to_handle_instance(msg_handle);
785 switch (msg->msg.type) {
786 case PSA_IPC_CONNECT:
787 /*
788 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
789 * input status is PSA_SUCCESS. Others return values are based on the
790 * input status.
791 */
792 if (status == PSA_SUCCESS) {
793 ret = msg_handle;
794 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
795 /* Refuse the client connection, indicating a permanent error. */
796 tfm_spm_free_conn_handle(service, conn_handle);
797 ret = PSA_ERROR_CONNECTION_REFUSED;
798 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
799 /* Fail the client connection, indicating a transient error. */
800 ret = PSA_ERROR_CONNECTION_BUSY;
801 } else {
802 tfm_core_panic();
803 }
804 break;
805 case PSA_IPC_DISCONNECT:
806 /* Service handle is not used anymore */
807 tfm_spm_free_conn_handle(service, conn_handle);
808
809 /*
810 * If the message type is PSA_IPC_DISCONNECT, then the status code is
811 * ignored
812 */
813 break;
814 default:
815 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800816
817#if PSA_FRAMEWORK_HAS_MM_IOVEC
818
819 /*
820 * If the unmapped function is not called for an input/output vector
821 * that has been mapped, the framework will remove the mapping.
822 */
823 int i;
824
825 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
826 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
827 SET_IOVEC_UNMAPPED(msg, i);
828 /*
829 * Any output vectors that are still mapped will report that
830 * zero bytes have been written.
831 */
832 if (i >= OUTVEC_IDX_BASE) {
833 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
834 }
835 }
836 }
837
838#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800839 /* Reply to a request message. Return values are based on status */
840 ret = status;
841 /*
842 * The total number of bytes written to a single parameter must be
843 * reported to the client by updating the len member of the
844 * psa_outvec structure for the parameter before returning from
845 * psa_call().
846 */
847 update_caller_outvec_len(msg);
848 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
849 tfm_spm_free_conn_handle(service, conn_handle);
850 }
851 } else {
852 tfm_core_panic();
853 }
854 }
855
856 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
857 /*
858 * If the source of the programmer error is a Secure Partition, the SPM
859 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
860 */
861 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
862 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
863 } else {
864 tfm_core_panic();
865 }
866 } else {
867 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
868 }
869
Mingyang Sun620c8562021-11-10 11:44:58 +0800870 /*
871 * TODO: It can be optimized further by moving critical section protection
872 * to mailbox. Also need to check implementation when secure context is
873 * involved.
874 */
875 CRITICAL_SECTION_ENTER(cs_assert);
876 ret = backend_instance.replying(msg, ret);
877 CRITICAL_SECTION_LEAVE(cs_assert);
878
879 return ret;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800880}
881
882void tfm_spm_partition_psa_notify(int32_t partition_id)
883{
Ken Liu5d73c872021-08-19 19:23:17 +0800884 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
885
886 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800887}
888
889void tfm_spm_partition_psa_clear(void)
890{
Ken Liu92ede9f2021-10-20 09:35:00 +0800891 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800892 struct partition_t *partition = NULL;
893
894 partition = tfm_spm_get_running_partition();
895 if (!partition) {
896 tfm_core_panic();
897 }
898
899 /*
900 * It is a fatal error if the Secure Partition's doorbell signal is not
901 * currently asserted.
902 */
903 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
904 tfm_core_panic();
905 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800906
907 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800908 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800909 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800910}
911
912void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
913{
Ken Liu92ede9f2021-10-20 09:35:00 +0800914 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800915 struct irq_load_info_t *irq_info = NULL;
916 struct partition_t *partition = NULL;
917
918 partition = tfm_spm_get_running_partition();
919 if (!partition) {
920 tfm_core_panic();
921 }
922
923 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
924 /* It is a fatal error if passed signal is not an interrupt signal. */
925 if (!irq_info) {
926 tfm_core_panic();
927 }
928
929 if (irq_info->flih_func) {
930 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800931 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800932 }
933
934 /* It is a fatal error if passed signal is not currently asserted */
935 if ((partition->signals_asserted & irq_signal) == 0) {
936 tfm_core_panic();
937 }
938
Ken Liu92ede9f2021-10-20 09:35:00 +0800939 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800940 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800941 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800942
Kevin Pengd399a1f2021-09-08 15:33:14 +0800943 tfm_hal_irq_clear_pending(irq_info->source);
944 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800945}
946
947void tfm_spm_partition_psa_panic(void)
948{
949 /*
950 * PSA FF recommends that the SPM causes the system to restart when a secure
951 * partition panics.
952 */
953 tfm_hal_system_reset();
954}
955
Kevin Peng67a89fd2021-11-25 11:22:02 +0800956void tfm_spm_partition_psa_irq_enable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800957{
958 struct partition_t *partition;
959 struct irq_load_info_t *irq_info;
960
961 partition = tfm_spm_get_running_partition();
962 if (!partition) {
963 tfm_core_panic();
964 }
965
966 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
967 if (!irq_info) {
968 tfm_core_panic();
969 }
970
Kevin Pengd399a1f2021-09-08 15:33:14 +0800971 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800972}
973
Kevin Peng67a89fd2021-11-25 11:22:02 +0800974psa_irq_status_t tfm_spm_partition_psa_irq_disable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800975{
976 struct partition_t *partition;
977 struct irq_load_info_t *irq_info;
978
979 partition = tfm_spm_get_running_partition();
980 if (!partition) {
981 tfm_core_panic();
982 }
983
984 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
985 if (!irq_info) {
986 tfm_core_panic();
987 }
988
Kevin Pengd399a1f2021-09-08 15:33:14 +0800989 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800990
991 return 1;
992}
993
994void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
995{
Ken Liu92ede9f2021-10-20 09:35:00 +0800996 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800997 struct irq_load_info_t *irq_info;
998 struct partition_t *partition;
999
1000 partition = tfm_spm_get_running_partition();
1001 if (!partition) {
1002 tfm_core_panic();
1003 }
1004
1005 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
1006 if (!irq_info) {
1007 tfm_core_panic();
1008 }
1009
1010 if (!irq_info->flih_func) {
1011 /* This API is for FLIH IRQs only */
1012 tfm_core_panic();
1013 }
1014
1015 if ((partition->signals_asserted & irq_signal) == 0) {
1016 /* The signal is not asserted */
1017 tfm_core_panic();
1018 }
1019
Ken Liu92ede9f2021-10-20 09:35:00 +08001020 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001021 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001022 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001023}
Shawn Shan038348e2021-09-08 17:11:04 +08001024
Xinyu Zhang2bc4d572021-12-27 16:37:46 +08001025/* psa_set_rhandle is only needed by connection-based services */
1026#if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
1027
1028void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
1029{
1030 struct tfm_msg_body_t *msg = NULL;
1031 struct tfm_conn_handle_t *conn_handle;
1032
1033 /* It is a fatal error if message handle is invalid */
1034 msg = tfm_spm_get_msg_from_handle(msg_handle);
1035 if (!msg) {
1036 tfm_core_panic();
1037 }
1038
1039 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
1040 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
1041 tfm_core_panic();
1042 }
1043
1044 msg->msg.rhandle = rhandle;
1045 conn_handle = tfm_spm_to_handle_instance(msg_handle);
1046
1047 /* Store reverse handle for following client calls. */
1048 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
1049}
1050
1051#endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
1052
Shawn Shan038348e2021-09-08 17:11:04 +08001053#if PSA_FRAMEWORK_HAS_MM_IOVEC
1054
1055const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1056 uint32_t invec_idx)
1057{
1058 struct tfm_msg_body_t *msg = NULL;
1059 uint32_t privileged;
1060 struct partition_t *partition = NULL;
1061
1062 /* It is a fatal error if message handle is invalid */
1063 msg = tfm_spm_get_msg_from_handle(msg_handle);
1064 if (!msg) {
1065 tfm_core_panic();
1066 }
1067
1068 partition = msg->service->partition;
1069 privileged = tfm_spm_partition_get_privileged_mode(
1070 partition->p_ldinf->flags);
1071
1072 /*
1073 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1074 * Service that received the message.
1075 */
1076 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1077 tfm_core_panic();
1078 }
1079
1080 /*
1081 * It is a fatal error if message handle does not refer to a request
1082 * message.
1083 */
1084 if (msg->msg.type < PSA_IPC_CALL) {
1085 tfm_core_panic();
1086 }
1087
1088 /*
1089 * It is a fatal error if invec_idx is equal to or greater than
1090 * PSA_MAX_IOVEC.
1091 */
1092 if (invec_idx >= PSA_MAX_IOVEC) {
1093 tfm_core_panic();
1094 }
1095
1096 /* It is a fatal error if the input vector has length zero. */
1097 if (msg->msg.in_size[invec_idx] == 0) {
1098 tfm_core_panic();
1099 }
1100
1101 /*
1102 * It is a fatal error if the input vector has already been mapped using
1103 * psa_map_invec().
1104 */
1105 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1106 tfm_core_panic();
1107 }
1108
1109 /*
1110 * It is a fatal error if the input vector has already been accessed
1111 * using psa_read() or psa_skip().
1112 */
1113 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1114 tfm_core_panic();
1115 }
1116
1117 /*
1118 * It is a fatal error if the memory reference for the wrap input vector is
1119 * invalid or not readable.
1120 */
1121 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1122 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1123 tfm_core_panic();
1124 }
1125
1126 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1127
1128 return msg->invec[invec_idx].base;
1129}
1130
1131void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1132 uint32_t invec_idx)
1133{
1134 struct tfm_msg_body_t *msg = NULL;
1135
1136 /* It is a fatal error if message handle is invalid */
1137 msg = tfm_spm_get_msg_from_handle(msg_handle);
1138 if (!msg) {
1139 tfm_core_panic();
1140 }
1141
1142 /*
1143 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1144 * Service that received the message.
1145 */
1146 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1147 tfm_core_panic();
1148 }
1149
1150 /*
1151 * It is a fatal error if message handle does not refer to a request
1152 * message.
1153 */
1154 if (msg->msg.type < PSA_IPC_CALL) {
1155 tfm_core_panic();
1156 }
1157
1158 /*
1159 * It is a fatal error if invec_idx is equal to or greater than
1160 * PSA_MAX_IOVEC.
1161 */
1162 if (invec_idx >= PSA_MAX_IOVEC) {
1163 tfm_core_panic();
1164 }
1165
1166 /*
1167 * It is a fatal error if The input vector has not been mapped by a call to
1168 * psa_map_invec().
1169 */
1170 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1171 tfm_core_panic();
1172 }
1173
1174 /*
1175 * It is a fatal error if the input vector has already been unmapped by a
1176 * call to psa_unmap_invec().
1177 */
1178 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1179 tfm_core_panic();
1180 }
1181
1182 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1183}
1184
1185void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1186 uint32_t outvec_idx)
1187{
1188 struct tfm_msg_body_t *msg = NULL;
1189 uint32_t privileged;
1190 struct partition_t *partition = NULL;
1191
1192 /* It is a fatal error if message handle is invalid */
1193 msg = tfm_spm_get_msg_from_handle(msg_handle);
1194 if (!msg) {
1195 tfm_core_panic();
1196 }
1197
1198 partition = msg->service->partition;
1199 privileged = tfm_spm_partition_get_privileged_mode(
1200 partition->p_ldinf->flags);
1201
1202 /*
1203 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1204 * Service that received the message.
1205 */
1206 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1207 tfm_core_panic();
1208 }
1209
1210 /*
1211 * It is a fatal error if message handle does not refer to a request
1212 * message.
1213 */
1214 if (msg->msg.type < PSA_IPC_CALL) {
1215 tfm_core_panic();
1216 }
1217
1218 /*
1219 * It is a fatal error if outvec_idx is equal to or greater than
1220 * PSA_MAX_IOVEC.
1221 */
1222 if (outvec_idx >= PSA_MAX_IOVEC) {
1223 tfm_core_panic();
1224 }
1225
1226 /* It is a fatal error if the output vector has length zero. */
1227 if (msg->msg.out_size[outvec_idx] == 0) {
1228 tfm_core_panic();
1229 }
1230
1231 /*
1232 * It is a fatal error if the output vector has already been mapped using
1233 * psa_map_outvec().
1234 */
1235 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1236 tfm_core_panic();
1237 }
1238
1239 /*
1240 * It is a fatal error if the output vector has already been accessed
1241 * using psa_write().
1242 */
1243 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1244 tfm_core_panic();
1245 }
1246
1247 /*
1248 * It is a fatal error if the output vector is invalid or not read-write.
1249 */
1250 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1251 msg->outvec[outvec_idx].len, false,
1252 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1253 tfm_core_panic();
1254 }
1255 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1256
1257 return msg->outvec[outvec_idx].base;
1258}
1259
1260void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1261 uint32_t outvec_idx, size_t len)
1262{
1263 struct tfm_msg_body_t *msg = NULL;
1264
1265 /* It is a fatal error if message handle is invalid */
1266 msg = tfm_spm_get_msg_from_handle(msg_handle);
1267 if (!msg) {
1268 tfm_core_panic();
1269 }
1270
1271 /*
1272 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1273 * Service that received the message.
1274 */
1275 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1276 tfm_core_panic();
1277 }
1278
1279 /*
1280 * It is a fatal error if message handle does not refer to a request
1281 * message.
1282 */
1283 if (msg->msg.type < PSA_IPC_CALL) {
1284 tfm_core_panic();
1285 }
1286
1287 /*
1288 * It is a fatal error if outvec_idx is equal to or greater than
1289 * PSA_MAX_IOVEC.
1290 */
1291 if (outvec_idx >= PSA_MAX_IOVEC) {
1292 tfm_core_panic();
1293 }
1294
1295 /*
1296 * It is a fatal error if len is greater than the output vector size.
1297 */
1298 if (len > msg->msg.out_size[outvec_idx]) {
1299 tfm_core_panic();
1300 }
1301
1302 /*
1303 * It is a fatal error if The output vector has not been mapped by a call to
1304 * psa_map_outvec().
1305 */
1306 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1307 tfm_core_panic();
1308 }
1309
1310 /*
1311 * It is a fatal error if the output vector has already been unmapped by a
1312 * call to psa_unmap_outvec().
1313 */
1314 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1315 tfm_core_panic();
1316 }
1317
1318 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1319
1320 /* Update the write number */
1321 msg->outvec[outvec_idx].len = len;
1322}
1323
1324#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */