blob: 28da0c52b2126a1b5e24f2f4cf83a62436bc4c3a [file] [log] [blame]
David Hu733d8f92019-09-23 15:32:40 +08001/*
Mingyang Sunbb4a42a2021-12-14 15:18:52 +08002 * Copyright (c) 2019-2022, Arm Limited. All rights reserved.
David Hu733d8f92019-09-23 15:32:40 +08003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
Mingyang Suneeca4652021-07-15 15:19:16 +08008#include <stdint.h>
Mingyang Sunb26b2802021-07-07 11:25:00 +08009#include "bitops.h"
Ken Liu92ede9f2021-10-20 09:35:00 +080010#include "critical_section.h"
Mingyang Suneeca4652021-07-15 15:19:16 +080011#include "psa/lifecycle.h"
David Hu733d8f92019-09-23 15:32:40 +080012#include "psa/service.h"
Kevin Peng3f67b2e2021-10-18 17:47:27 +080013#include "interrupt.h"
Mingyang Sun7397b4f2020-06-17 15:07:45 +080014#include "spm_ipc.h"
Mingyang Sun22a3faf2021-07-09 15:32:47 +080015#include "tfm_arch.h"
David Hu733d8f92019-09-23 15:32:40 +080016#include "tfm_core_utils.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080017#include "load/partition_defs.h"
Mingyang Sun2b352662021-04-21 11:35:43 +080018#include "load/service_defs.h"
Ken Liu3dd92562021-08-17 16:22:54 +080019#include "load/interrupt_defs.h"
Ken Liuf39d8eb2021-10-07 12:55:33 +080020#include "ffm/psa_api.h"
Summer Qin5fdcf632020-06-22 16:49:24 +080021#include "utilities.h"
Mingyang Sundeae45d2021-09-06 15:31:07 +080022#include "ffm/backend.h"
Ken Liue07c3b72021-10-14 16:19:13 +080023#include "ffm/psa_api.h"
Ken Liubcae38b2021-01-20 15:47:44 +080024#include "ffm/spm_error_base.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080025#include "tfm_rpc.h"
26#include "tfm_spm_hal.h"
Kevin Pengd399a1f2021-09-08 15:33:14 +080027#include "tfm_hal_interrupt.h"
Mingyang Sunb26b2802021-07-07 11:25:00 +080028#include "tfm_hal_platform.h"
Ken Liu82e3eac2021-10-14 16:19:13 +080029#include "tfm_psa_call_pack.h"
David Hu733d8f92019-09-23 15:32:40 +080030
Ken Liub3b2cb62021-05-22 00:39:28 +080031#define GET_STATELESS_SERVICE(index) (stateless_services_ref_tbl[index])
Xinyu Zhanga38e9b52021-06-02 17:48:01 +080032extern struct service_t *stateless_services_ref_tbl[];
Mingyang Suncb6f70e2021-03-05 23:30:25 +080033
Shawn Shan038348e2021-09-08 17:11:04 +080034#if PSA_FRAMEWORK_HAS_MM_IOVEC
35
36/*
37 * The MM-IOVEC status
38 * The max total number of invec and outvec is 8.
39 * Each invec/outvec takes 4 bit, 32 bits in total.
40 *
41 * The encoding format of the MM-IOVEC status:
42 *--------------------------------------------------------------
43 *| Bit | 31 - 28 | 27 - 24 | ... | 7 - 4 | 3 - 0 |
44 *--------------------------------------------------------------
45 *| Vector | outvec[3] | outvec[2] | ... | invec[1] | invec[0] |
46 *--------------------------------------------------------------
47 *
48 * Take invec[0] as an example:
49 *
50 * bit 0: whether invec[0] has been mapped.
51 * bit 1: whether invec[0] has been unmapped.
52 * bit 2: whether invec[0] has been accessed using psa_read(), psa_skip() or
53 * psa_write().
54 * bit 3: reserved for invec[0].
55 */
56
57#define IOVEC_STATUS_BITS 4 /* Each vector occupies 4 bits. */
58#define OUTVEC_IDX_BASE 4 /*
59 * Base index of outvec.
60 * There are four invecs in front of
61 * outvec.
62 */
63#define INVEC_IDX_BASE 0 /* Base index of invec. */
64
65#define IOVEC_MAPPED_BIT (1U << 0)
66#define IOVEC_UNMAPPED_BIT (1U << 1)
67#define IOVEC_ACCESSED_BIT (1U << 2)
68
69#define IOVEC_IS_MAPPED(msg, iovec_idx) \
70 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
71 IOVEC_MAPPED_BIT)
72#define IOVEC_IS_UNMAPPED(msg, iovec_idx) \
73 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
74 IOVEC_UNMAPPED_BIT)
75#define IOVEC_IS_ACCESSED(msg, iovec_idx) \
76 ((((msg)->iovec_status) >> ((iovec_idx) * IOVEC_STATUS_BITS)) & \
77 IOVEC_ACCESSED_BIT)
78#define SET_IOVEC_MAPPED(msg, iovec_idx) \
79 (((msg)->iovec_status) |= (IOVEC_MAPPED_BIT << \
80 ((iovec_idx) * IOVEC_STATUS_BITS)))
81#define SET_IOVEC_UNMAPPED(msg, iovec_idx) \
82 (((msg)->iovec_status) |= (IOVEC_UNMAPPED_BIT << \
83 ((iovec_idx) * IOVEC_STATUS_BITS)))
84#define SET_IOVEC_ACCESSED(msg, iovec_idx) \
85 (((msg)->iovec_status) |= (IOVEC_ACCESSED_BIT << \
86 ((iovec_idx) * IOVEC_STATUS_BITS)))
87
88#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */
89
Xinyu Zhangb287ef82021-11-03 18:38:50 +080090void spm_handle_programmer_errors(psa_status_t status)
91{
92 if (status == PSA_ERROR_PROGRAMMER_ERROR ||
93 status == PSA_ERROR_CONNECTION_REFUSED ||
94 status == PSA_ERROR_CONNECTION_BUSY) {
95 if (!tfm_spm_is_ns_caller()) {
96 tfm_core_panic();
97 }
98 }
99}
100
Mingyang Suneeca4652021-07-15 15:19:16 +0800101uint32_t tfm_spm_get_lifecycle_state(void)
102{
103 /*
104 * FixMe: return PSA_LIFECYCLE_UNKNOWN to the caller directly. It will be
105 * implemented in the future.
106 */
107 return PSA_LIFECYCLE_UNKNOWN;
108}
109
110/* PSA Client API function body */
111
Mingyang Sund44522a2020-01-16 16:48:37 +0800112uint32_t tfm_spm_client_psa_framework_version(void)
David Hu733d8f92019-09-23 15:32:40 +0800113{
114 return PSA_FRAMEWORK_VERSION;
115}
116
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800117uint32_t tfm_spm_client_psa_version(uint32_t sid)
David Hu733d8f92019-09-23 15:32:40 +0800118{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800119 struct service_t *service;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800120 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800121
122 /*
123 * It should return PSA_VERSION_NONE if the RoT Service is not
124 * implemented.
125 */
126 service = tfm_spm_get_service_by_sid(sid);
127 if (!service) {
128 return PSA_VERSION_NONE;
129 }
130
131 /*
Shawn Shan2365c902019-12-19 18:35:36 +0800132 * It should return PSA_VERSION_NONE if the caller is not authorized
133 * to access the RoT Service.
David Hu733d8f92019-09-23 15:32:40 +0800134 */
Ken Liubcae38b2021-01-20 15:47:44 +0800135 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
Shawn Shan2365c902019-12-19 18:35:36 +0800136 return PSA_VERSION_NONE;
David Hu733d8f92019-09-23 15:32:40 +0800137 }
138
Ken Liuacd2a572021-05-12 16:19:04 +0800139 return service->p_ldinf->version;
David Hu733d8f92019-09-23 15:32:40 +0800140}
141
Mingyang Suneeca4652021-07-15 15:19:16 +0800142psa_status_t tfm_spm_client_psa_call(psa_handle_t handle,
143 uint32_t ctrl_param,
144 const psa_invec *inptr,
145 psa_outvec *outptr)
David Hu733d8f92019-09-23 15:32:40 +0800146{
147 psa_invec invecs[PSA_MAX_IOVEC];
148 psa_outvec outvecs[PSA_MAX_IOVEC];
Summer Qin630c76b2020-05-20 10:32:58 +0800149 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun783a59b2021-04-20 15:52:18 +0800150 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800151 struct tfm_msg_body_t *msg;
Summer Qinba2346e2019-11-12 16:26:31 +0800152 int i, j;
Summer Qin1ce712a2019-10-14 18:04:05 +0800153 int32_t client_id;
Mingyang Sun453ad402021-03-17 17:58:33 +0800154 uint32_t sid, version, index;
Mingyang Sune529e3b2021-07-12 14:46:30 +0800155 uint32_t privileged;
Mingyang Sun620c8562021-11-10 11:44:58 +0800156 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800157 bool ns_caller = tfm_spm_is_ns_caller();
Mingyang Suneeca4652021-07-15 15:19:16 +0800158 int32_t type = (int32_t)(int16_t)((ctrl_param & TYPE_MASK) >> TYPE_OFFSET);
159 size_t in_num = (size_t)((ctrl_param & IN_LEN_MASK) >> IN_LEN_OFFSET);
160 size_t out_num = (size_t)((ctrl_param & OUT_LEN_MASK) >> OUT_LEN_OFFSET);
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800161
162 /* The request type must be zero or positive. */
163 if (type < 0) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800164 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800165 }
David Hu733d8f92019-09-23 15:32:40 +0800166
Shawn Shanb222d892021-01-04 17:41:48 +0800167 /* It is a PROGRAMMER ERROR if in_len + out_len > PSA_MAX_IOVEC. */
David Hu733d8f92019-09-23 15:32:40 +0800168 if ((in_num > PSA_MAX_IOVEC) ||
169 (out_num > PSA_MAX_IOVEC) ||
170 (in_num + out_num > PSA_MAX_IOVEC)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800171 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800172 }
173
Kevin Peng385fda82021-08-18 10:41:19 +0800174 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800175
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800176 /* Allocate space from handle pool for static handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800177 if (IS_STATIC_HANDLE(handle)) {
Mingyang Sun453ad402021-03-17 17:58:33 +0800178 index = GET_INDEX_FROM_STATIC_HANDLE(handle);
Mingyang Sune8d38082021-03-30 18:34:40 +0800179
180 if (!IS_VALID_STATIC_HANDLE_IDX(index)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800181 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sune8d38082021-03-30 18:34:40 +0800182 }
183
Mingyang Sun453ad402021-03-17 17:58:33 +0800184 service = GET_STATELESS_SERVICE(index);
Mingyang Sun86213242021-07-14 10:26:43 +0800185 if (!service) {
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800186 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun86213242021-07-14 10:26:43 +0800187 }
188
Ken Liub3b2cb62021-05-22 00:39:28 +0800189 sid = service->p_ldinf->sid;
Mingyang Sun453ad402021-03-17 17:58:33 +0800190
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800191 /*
192 * It is a PROGRAMMER ERROR if the caller is not authorized to access
193 * the RoT Service.
194 */
195 if (tfm_spm_check_authorization(sid, service, ns_caller)
196 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800197 return PSA_ERROR_CONNECTION_REFUSED;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800198 }
199
Mingyang Sun453ad402021-03-17 17:58:33 +0800200 version = GET_VERSION_FROM_STATIC_HANDLE(handle);
201
202 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800203 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun453ad402021-03-17 17:58:33 +0800204 }
205
Mingyang Sun620c8562021-11-10 11:44:58 +0800206 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800207 conn_handle = tfm_spm_create_conn_handle(service, client_id);
Mingyang Sun620c8562021-11-10 11:44:58 +0800208 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800209
210 if (!conn_handle) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800211 return PSA_ERROR_CONNECTION_BUSY;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800212 }
213
Mingyang Sun6d5dc3d2021-03-15 15:34:44 +0800214 conn_handle->rhandle = NULL;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800215 handle = tfm_spm_to_user_handle(conn_handle);
216 } else {
217 conn_handle = tfm_spm_to_handle_instance(handle);
218
219 /* It is a PROGRAMMER ERROR if an invalid handle was passed. */
220 if (tfm_spm_validate_conn_handle(conn_handle, client_id)
221 != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800222 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800223 }
224
225 /*
226 * It is a PROGRAMMER ERROR if the connection is currently
227 * handling a request.
228 */
229 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800230 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Suncb6f70e2021-03-05 23:30:25 +0800231 }
232
233 /*
234 * Return PSA_ERROR_PROGRAMMER_ERROR immediately for the connection
235 * has been terminated by the RoT Service.
236 */
237 if (conn_handle->status == TFM_HANDLE_STATUS_CONNECT_ERROR) {
238 return PSA_ERROR_PROGRAMMER_ERROR;
239 }
240
Ken Liuf39d8eb2021-10-07 12:55:33 +0800241 service = conn_handle->internal_msg.service;
Shawn Shanb222d892021-01-04 17:41:48 +0800242
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800243 if (!service) {
244 /* FixMe: Need to implement a mechanism to resolve this failure. */
245 return PSA_ERROR_PROGRAMMER_ERROR;
246 }
David Hu733d8f92019-09-23 15:32:40 +0800247 }
248
Mingyang Sune529e3b2021-07-12 14:46:30 +0800249 privileged = tfm_spm_get_caller_privilege_mode();
250
Kevin Pengedb8ee42021-03-09 16:50:11 +0800251 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800252 * Read client invecs from the wrap input vector. It is a PROGRAMMER ERROR
David Hu733d8f92019-09-23 15:32:40 +0800253 * if the memory reference for the wrap input vector is invalid or not
254 * readable.
255 */
256 if (tfm_memory_check(inptr, in_num * sizeof(psa_invec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800257 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800258 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800259 }
Summer Qinba2346e2019-11-12 16:26:31 +0800260
David Hu733d8f92019-09-23 15:32:40 +0800261 /*
262 * Read client outvecs from the wrap output vector and will update the
Shawn Shanb222d892021-01-04 17:41:48 +0800263 * actual length later. It is a PROGRAMMER ERROR if the memory reference for
David Hu733d8f92019-09-23 15:32:40 +0800264 * the wrap output vector is invalid or not read-write.
265 */
266 if (tfm_memory_check(outptr, out_num * sizeof(psa_outvec), ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800267 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800268 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800269 }
270
Summer Qinf24dbb52020-07-23 14:53:54 +0800271 spm_memset(invecs, 0, sizeof(invecs));
272 spm_memset(outvecs, 0, sizeof(outvecs));
David Hu733d8f92019-09-23 15:32:40 +0800273
274 /* Copy the address out to avoid TOCTOU attacks. */
Summer Qinf24dbb52020-07-23 14:53:54 +0800275 spm_memcpy(invecs, inptr, in_num * sizeof(psa_invec));
276 spm_memcpy(outvecs, outptr, out_num * sizeof(psa_outvec));
David Hu733d8f92019-09-23 15:32:40 +0800277
278 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800279 * For client input vector, it is a PROGRAMMER ERROR if the provided payload
David Hu733d8f92019-09-23 15:32:40 +0800280 * memory reference was invalid or not readable.
281 */
282 for (i = 0; i < in_num; i++) {
283 if (tfm_memory_check(invecs[i].base, invecs[i].len, ns_caller,
Ken Liubcae38b2021-01-20 15:47:44 +0800284 TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800285 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800286 }
287 }
Summer Qinba2346e2019-11-12 16:26:31 +0800288
289 /*
290 * Clients must never overlap input parameters because of the risk of a
291 * double-fetch inconsistency.
292 * Overflow is checked in tfm_memory_check functions.
293 */
294 for (i = 0; i + 1 < in_num; i++) {
295 for (j = i+1; j < in_num; j++) {
TTornblom83d96372019-11-19 12:53:16 +0100296 if (!((char *) invecs[j].base + invecs[j].len <=
297 (char *) invecs[i].base ||
298 (char *) invecs[j].base >=
299 (char *) invecs[i].base + invecs[i].len)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800300 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qinba2346e2019-11-12 16:26:31 +0800301 }
302 }
303 }
304
David Hu733d8f92019-09-23 15:32:40 +0800305 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800306 * For client output vector, it is a PROGRAMMER ERROR if the provided
307 * payload memory reference was invalid or not read-write.
David Hu733d8f92019-09-23 15:32:40 +0800308 */
309 for (i = 0; i < out_num; i++) {
310 if (tfm_memory_check(outvecs[i].base, outvecs[i].len,
Ken Liubcae38b2021-01-20 15:47:44 +0800311 ns_caller, TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800312 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800313 }
314 }
315
316 /*
317 * FixMe: Need to check if the message is unrecognized by the RoT
318 * Service or incorrectly formatted.
319 */
Kevin Pengdf6aa292021-03-11 17:58:50 +0800320 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
David Hu733d8f92019-09-23 15:32:40 +0800321
Ken Liu505b1702020-05-29 13:19:58 +0800322 tfm_spm_fill_msg(msg, service, handle, type, client_id,
Summer Qin630c76b2020-05-20 10:32:58 +0800323 invecs, in_num, outvecs, out_num, outptr);
David Hu733d8f92019-09-23 15:32:40 +0800324
Mingyang Sundeae45d2021-09-06 15:31:07 +0800325 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800326}
327
Xinyu Zhang2bc4d572021-12-27 16:37:46 +0800328/* Following PSA APIs are only needed by connection-based services */
329#if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
330
331psa_status_t tfm_spm_client_psa_connect(uint32_t sid, uint32_t version)
332{
333 struct service_t *service;
334 struct tfm_msg_body_t *msg;
335 struct tfm_conn_handle_t *connect_handle;
336 int32_t client_id;
337 psa_handle_t handle;
338 bool ns_caller = tfm_spm_is_ns_caller();
339 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
340
341 /*
342 * It is a PROGRAMMER ERROR if the RoT Service does not exist on the
343 * platform.
344 */
345 service = tfm_spm_get_service_by_sid(sid);
346 if (!service) {
347 return PSA_ERROR_CONNECTION_REFUSED;
348 }
349
350 /* It is a PROGRAMMER ERROR if connecting to a stateless service. */
351 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
352 return PSA_ERROR_PROGRAMMER_ERROR;
353 }
354
355 /*
356 * It is a PROGRAMMER ERROR if the caller is not authorized to access the
357 * RoT Service.
358 */
359 if (tfm_spm_check_authorization(sid, service, ns_caller) != SPM_SUCCESS) {
360 return PSA_ERROR_CONNECTION_REFUSED;
361 }
362
363 /*
364 * It is a PROGRAMMER ERROR if the version of the RoT Service requested is
365 * not supported on the platform.
366 */
367 if (tfm_spm_check_client_version(service, version) != SPM_SUCCESS) {
368 return PSA_ERROR_CONNECTION_REFUSED;
369 }
370
371 client_id = tfm_spm_get_client_id(ns_caller);
372
373 /*
374 * Create connection handle here since it is possible to return the error
375 * code to client when creation fails.
376 */
377 CRITICAL_SECTION_ENTER(cs_assert);
378 connect_handle = tfm_spm_create_conn_handle(service, client_id);
379 CRITICAL_SECTION_LEAVE(cs_assert);
380 if (!connect_handle) {
381 return PSA_ERROR_CONNECTION_BUSY;
382 }
383
384 msg = tfm_spm_get_msg_buffer_from_conn_handle(connect_handle);
385
386 handle = tfm_spm_to_user_handle(connect_handle);
387 /* No input or output needed for connect message */
388 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_CONNECT,
389 client_id, NULL, 0, NULL, 0, NULL);
390
391 return backend_instance.messaging(service, msg);
392}
393
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800394psa_status_t tfm_spm_client_psa_close(psa_handle_t handle)
David Hu733d8f92019-09-23 15:32:40 +0800395{
Mingyang Sun783a59b2021-04-20 15:52:18 +0800396 struct service_t *service;
David Hu733d8f92019-09-23 15:32:40 +0800397 struct tfm_msg_body_t *msg;
Summer Qin630c76b2020-05-20 10:32:58 +0800398 struct tfm_conn_handle_t *conn_handle;
Summer Qin1ce712a2019-10-14 18:04:05 +0800399 int32_t client_id;
Mingyang Sun22a3faf2021-07-09 15:32:47 +0800400 bool ns_caller = tfm_spm_is_ns_caller();
David Hu733d8f92019-09-23 15:32:40 +0800401
402 /* It will have no effect if called with the NULL handle */
403 if (handle == PSA_NULL_HANDLE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800404 return PSA_SUCCESS;
David Hu733d8f92019-09-23 15:32:40 +0800405 }
406
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800407 /* It is a PROGRAMMER ERROR if called with a stateless handle. */
Mingyang Sune8d38082021-03-30 18:34:40 +0800408 if (IS_STATIC_HANDLE(handle)) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800409 return PSA_ERROR_PROGRAMMER_ERROR;
Mingyang Sun00cef5e2021-03-04 13:41:56 +0800410 }
411
Kevin Peng385fda82021-08-18 10:41:19 +0800412 client_id = tfm_spm_get_client_id(ns_caller);
Summer Qin1ce712a2019-10-14 18:04:05 +0800413
Summer Qin373feb12020-03-27 15:35:33 +0800414 conn_handle = tfm_spm_to_handle_instance(handle);
David Hu733d8f92019-09-23 15:32:40 +0800415 /*
Shawn Shanb222d892021-01-04 17:41:48 +0800416 * It is a PROGRAMMER ERROR if an invalid handle was provided that is not
417 * the null handle.
David Hu733d8f92019-09-23 15:32:40 +0800418 */
Ken Liubcae38b2021-01-20 15:47:44 +0800419 if (tfm_spm_validate_conn_handle(conn_handle, client_id) != SPM_SUCCESS) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800420 return PSA_ERROR_PROGRAMMER_ERROR;
Summer Qin1ce712a2019-10-14 18:04:05 +0800421 }
Shawn Shanb222d892021-01-04 17:41:48 +0800422
Ken Liuf39d8eb2021-10-07 12:55:33 +0800423 service = conn_handle->internal_msg.service;
David Hu733d8f92019-09-23 15:32:40 +0800424 if (!service) {
425 /* FixMe: Need to implement one mechanism to resolve this failure. */
Mingyang Sunbb4a42a2021-12-14 15:18:52 +0800426 return PSA_ERROR_PROGRAMMER_ERROR;
David Hu733d8f92019-09-23 15:32:40 +0800427 }
428
Kevin Pengdf6aa292021-03-11 17:58:50 +0800429 msg = tfm_spm_get_msg_buffer_from_conn_handle(conn_handle);
David Hu733d8f92019-09-23 15:32:40 +0800430
Shawn Shanb222d892021-01-04 17:41:48 +0800431 /*
432 * It is a PROGRAMMER ERROR if the connection is currently handling a
433 * request.
434 */
Summer Qin630c76b2020-05-20 10:32:58 +0800435 if (conn_handle->status == TFM_HANDLE_STATUS_ACTIVE) {
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800436 return PSA_ERROR_PROGRAMMER_ERROR;
Shawn Shancc39fcb2019-11-13 15:38:16 +0800437 }
438
David Hu733d8f92019-09-23 15:32:40 +0800439 /* No input or output needed for close message */
Ken Liu505b1702020-05-29 13:19:58 +0800440 tfm_spm_fill_msg(msg, service, handle, PSA_IPC_DISCONNECT, client_id,
David Hu733d8f92019-09-23 15:32:40 +0800441 NULL, 0, NULL, 0, NULL);
442
Xinyu Zhangb287ef82021-11-03 18:38:50 +0800443 return backend_instance.messaging(service, msg);
David Hu733d8f92019-09-23 15:32:40 +0800444}
Mingyang Sunb26b2802021-07-07 11:25:00 +0800445
Xinyu Zhang2bc4d572021-12-27 16:37:46 +0800446#endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
447
Mingyang Suneeca4652021-07-15 15:19:16 +0800448/* PSA Partition API function body */
449
Mingyang Sunb26b2802021-07-07 11:25:00 +0800450psa_signal_t tfm_spm_partition_psa_wait(psa_signal_t signal_mask,
451 uint32_t timeout)
452{
453 struct partition_t *partition = NULL;
454
455 /*
456 * Timeout[30:0] are reserved for future use.
457 * SPM must ignore the value of RES.
458 */
459 timeout &= PSA_TIMEOUT_MASK;
460
461 partition = tfm_spm_get_running_partition();
462 if (!partition) {
463 tfm_core_panic();
464 }
465
466 /*
467 * It is a PROGRAMMER ERROR if the signal_mask does not include any assigned
468 * signals.
469 */
470 if ((partition->signals_allowed & signal_mask) == 0) {
471 tfm_core_panic();
472 }
473
474 /*
Ken Liu5d73c872021-08-19 19:23:17 +0800475 * thrd_wait_on() blocks the caller thread if no signals are available.
Mingyang Sunb26b2802021-07-07 11:25:00 +0800476 * In this case, the return value of this function is temporary set into
477 * runtime context. After new signal(s) are available, the return value
478 * is updated with the available signal(s) and blocked thread gets to run.
479 */
480 if (timeout == PSA_BLOCK &&
481 (partition->signals_asserted & signal_mask) == 0) {
482 partition->signals_waiting = signal_mask;
Ken Liuf39d8eb2021-10-07 12:55:33 +0800483 thrd_wait_on(&partition->waitobj, CURRENT_THREAD);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800484 }
485
486 return partition->signals_asserted & signal_mask;
487}
488
489psa_status_t tfm_spm_partition_psa_get(psa_signal_t signal, psa_msg_t *msg)
490{
491 struct tfm_msg_body_t *tmp_msg = NULL;
492 struct partition_t *partition = NULL;
493 uint32_t privileged;
494
495 /*
496 * Only one message could be retrieved every time for psa_get(). It is a
497 * fatal error if the input signal has more than a signal bit set.
498 */
499 if (!IS_ONLY_ONE_BIT_IN_UINT32(signal)) {
500 tfm_core_panic();
501 }
502
503 partition = tfm_spm_get_running_partition();
504 if (!partition) {
505 tfm_core_panic();
506 }
Kevin Penga40d29f2022-01-19 14:44:34 +0800507 privileged = GET_PARTITION_PRIVILEGED_MODE(partition->p_ldinf);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800508
509 /*
510 * Write the message to the service buffer. It is a fatal error if the
511 * input msg pointer is not a valid memory reference or not read-write.
512 */
513 if (tfm_memory_check(msg, sizeof(psa_msg_t), false, TFM_MEMORY_ACCESS_RW,
514 privileged) != SPM_SUCCESS) {
515 tfm_core_panic();
516 }
517
518 /*
519 * It is a fatal error if the caller call psa_get() when no message has
520 * been set. The caller must call this function after an RoT Service signal
521 * is returned by psa_wait().
522 */
523 if (partition->signals_asserted == 0) {
524 tfm_core_panic();
525 }
526
527 /*
528 * It is a fatal error if the RoT Service signal is not currently asserted.
529 */
530 if ((partition->signals_asserted & signal) == 0) {
531 tfm_core_panic();
532 }
533
534 /*
535 * Get message by signal from partition. It is a fatal error if getting
536 * failed, which means the input signal is not correspond to an RoT service.
537 */
538 tmp_msg = tfm_spm_get_msg_by_signal(partition, signal);
539 if (!tmp_msg) {
540 return PSA_ERROR_DOES_NOT_EXIST;
541 }
542
543 (TO_CONTAINER(tmp_msg,
544 struct tfm_conn_handle_t,
545 internal_msg))->status = TFM_HANDLE_STATUS_ACTIVE;
546
547 spm_memcpy(msg, &tmp_msg->msg, sizeof(psa_msg_t));
548
549 return PSA_SUCCESS;
550}
551
Mingyang Sunb26b2802021-07-07 11:25:00 +0800552size_t tfm_spm_partition_psa_read(psa_handle_t msg_handle, uint32_t invec_idx,
553 void *buffer, size_t num_bytes)
554{
555 size_t bytes;
556 struct tfm_msg_body_t *msg = NULL;
Kevin Penga40d29f2022-01-19 14:44:34 +0800557 uint32_t priv_mode;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800558
559 /* It is a fatal error if message handle is invalid */
560 msg = tfm_spm_get_msg_from_handle(msg_handle);
561 if (!msg) {
562 tfm_core_panic();
563 }
564
Kevin Penga40d29f2022-01-19 14:44:34 +0800565 priv_mode = GET_PARTITION_PRIVILEGED_MODE(msg->service->partition->p_ldinf);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800566
567 /*
568 * It is a fatal error if message handle does not refer to a request
569 * message
570 */
571 if (msg->msg.type < PSA_IPC_CALL) {
572 tfm_core_panic();
573 }
574
575 /*
576 * It is a fatal error if invec_idx is equal to or greater than
577 * PSA_MAX_IOVEC
578 */
579 if (invec_idx >= PSA_MAX_IOVEC) {
580 tfm_core_panic();
581 }
582
583 /* There was no remaining data in this input vector */
584 if (msg->msg.in_size[invec_idx] == 0) {
585 return 0;
586 }
587
Shawn Shan038348e2021-09-08 17:11:04 +0800588#if PSA_FRAMEWORK_HAS_MM_IOVEC
589 /*
590 * It is a fatal error if the input vector has already been mapped using
591 * psa_map_invec().
592 */
593 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
594 tfm_core_panic();
595 }
596
597 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
598#endif
599
Mingyang Sunb26b2802021-07-07 11:25:00 +0800600 /*
601 * Copy the client data to the service buffer. It is a fatal error
602 * if the memory reference for buffer is invalid or not read-write.
603 */
604 if (tfm_memory_check(buffer, num_bytes, false,
Kevin Penga40d29f2022-01-19 14:44:34 +0800605 TFM_MEMORY_ACCESS_RW, priv_mode) != SPM_SUCCESS) {
Mingyang Sunb26b2802021-07-07 11:25:00 +0800606 tfm_core_panic();
607 }
608
609 bytes = num_bytes > msg->msg.in_size[invec_idx] ?
610 msg->msg.in_size[invec_idx] : num_bytes;
611
612 spm_memcpy(buffer, msg->invec[invec_idx].base, bytes);
613
614 /* There maybe some remaining data */
615 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base + bytes;
616 msg->msg.in_size[invec_idx] -= bytes;
617
618 return bytes;
619}
620
621size_t tfm_spm_partition_psa_skip(psa_handle_t msg_handle, uint32_t invec_idx,
622 size_t num_bytes)
623{
624 struct tfm_msg_body_t *msg = NULL;
625
626 /* It is a fatal error if message handle is invalid */
627 msg = tfm_spm_get_msg_from_handle(msg_handle);
628 if (!msg) {
629 tfm_core_panic();
630 }
631
632 /*
633 * It is a fatal error if message handle does not refer to a request
634 * message
635 */
636 if (msg->msg.type < PSA_IPC_CALL) {
637 tfm_core_panic();
638 }
639
640 /*
641 * It is a fatal error if invec_idx is equal to or greater than
642 * PSA_MAX_IOVEC
643 */
644 if (invec_idx >= PSA_MAX_IOVEC) {
645 tfm_core_panic();
646 }
647
648 /* There was no remaining data in this input vector */
649 if (msg->msg.in_size[invec_idx] == 0) {
650 return 0;
651 }
652
Shawn Shan038348e2021-09-08 17:11:04 +0800653#if PSA_FRAMEWORK_HAS_MM_IOVEC
654 /*
655 * It is a fatal error if the input vector has already been mapped using
656 * psa_map_invec().
657 */
658 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
659 tfm_core_panic();
660 }
661
662 SET_IOVEC_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE));
663#endif
664
Mingyang Sunb26b2802021-07-07 11:25:00 +0800665 /*
666 * If num_bytes is greater than the remaining size of the input vector then
667 * the remaining size of the input vector is used.
668 */
669 if (num_bytes > msg->msg.in_size[invec_idx]) {
670 num_bytes = msg->msg.in_size[invec_idx];
671 }
672
673 /* There maybe some remaining data */
674 msg->invec[invec_idx].base = (char *)msg->invec[invec_idx].base +
675 num_bytes;
676 msg->msg.in_size[invec_idx] -= num_bytes;
677
678 return num_bytes;
679}
680
681void tfm_spm_partition_psa_write(psa_handle_t msg_handle, uint32_t outvec_idx,
682 const void *buffer, size_t num_bytes)
683{
684 struct tfm_msg_body_t *msg = NULL;
Kevin Penga40d29f2022-01-19 14:44:34 +0800685 uint32_t priv_mode;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800686
687 /* It is a fatal error if message handle is invalid */
688 msg = tfm_spm_get_msg_from_handle(msg_handle);
689 if (!msg) {
690 tfm_core_panic();
691 }
692
Kevin Penga40d29f2022-01-19 14:44:34 +0800693 priv_mode = GET_PARTITION_PRIVILEGED_MODE(msg->service->partition->p_ldinf);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800694
695 /*
696 * It is a fatal error if message handle does not refer to a request
697 * message
698 */
699 if (msg->msg.type < PSA_IPC_CALL) {
700 tfm_core_panic();
701 }
702
703 /*
704 * It is a fatal error if outvec_idx is equal to or greater than
705 * PSA_MAX_IOVEC
706 */
707 if (outvec_idx >= PSA_MAX_IOVEC) {
708 tfm_core_panic();
709 }
710
711 /*
712 * It is a fatal error if the call attempts to write data past the end of
713 * the client output vector
714 */
715 if (num_bytes > msg->msg.out_size[outvec_idx] -
716 msg->outvec[outvec_idx].len) {
717 tfm_core_panic();
718 }
719
Shawn Shan038348e2021-09-08 17:11:04 +0800720#if PSA_FRAMEWORK_HAS_MM_IOVEC
721 /*
722 * It is a fatal error if the output vector has already been mapped using
723 * psa_map_outvec().
724 */
725 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
726 tfm_core_panic();
727 }
728
729 SET_IOVEC_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE));
730#endif
731
Mingyang Sunb26b2802021-07-07 11:25:00 +0800732 /*
733 * Copy the service buffer to client outvecs. It is a fatal error
734 * if the memory reference for buffer is invalid or not readable.
735 */
736 if (tfm_memory_check(buffer, num_bytes, false,
Kevin Penga40d29f2022-01-19 14:44:34 +0800737 TFM_MEMORY_ACCESS_RO, priv_mode) != SPM_SUCCESS) {
Mingyang Sunb26b2802021-07-07 11:25:00 +0800738 tfm_core_panic();
739 }
740
741 spm_memcpy((char *)msg->outvec[outvec_idx].base +
742 msg->outvec[outvec_idx].len, buffer, num_bytes);
743
744 /* Update the write number */
745 msg->outvec[outvec_idx].len += num_bytes;
746}
747
Ken Liuf39d8eb2021-10-07 12:55:33 +0800748int32_t tfm_spm_partition_psa_reply(psa_handle_t msg_handle,
749 psa_status_t status)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800750{
751 struct service_t *service = NULL;
752 struct tfm_msg_body_t *msg = NULL;
753 int32_t ret = PSA_SUCCESS;
754 struct tfm_conn_handle_t *conn_handle;
Mingyang Sun620c8562021-11-10 11:44:58 +0800755 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800756
757 /* It is a fatal error if message handle is invalid */
758 msg = tfm_spm_get_msg_from_handle(msg_handle);
759 if (!msg) {
760 tfm_core_panic();
761 }
762
763 /*
764 * RoT Service information is needed in this function, stored it in message
765 * body structure. Only two parameters are passed in this function: handle
766 * and status, so it is useful and simply to do like this.
767 */
768 service = msg->service;
769 if (!service) {
770 tfm_core_panic();
771 }
772
773 /*
774 * Three type of message are passed in this function: CONNECTION, REQUEST,
775 * DISCONNECTION. It needs to process differently for each type.
776 */
777 conn_handle = tfm_spm_to_handle_instance(msg_handle);
778 switch (msg->msg.type) {
779 case PSA_IPC_CONNECT:
780 /*
781 * Reply to PSA_IPC_CONNECT message. Connect handle is returned if the
782 * input status is PSA_SUCCESS. Others return values are based on the
783 * input status.
784 */
785 if (status == PSA_SUCCESS) {
786 ret = msg_handle;
787 } else if (status == PSA_ERROR_CONNECTION_REFUSED) {
788 /* Refuse the client connection, indicating a permanent error. */
789 tfm_spm_free_conn_handle(service, conn_handle);
790 ret = PSA_ERROR_CONNECTION_REFUSED;
791 } else if (status == PSA_ERROR_CONNECTION_BUSY) {
792 /* Fail the client connection, indicating a transient error. */
793 ret = PSA_ERROR_CONNECTION_BUSY;
794 } else {
795 tfm_core_panic();
796 }
797 break;
798 case PSA_IPC_DISCONNECT:
799 /* Service handle is not used anymore */
800 tfm_spm_free_conn_handle(service, conn_handle);
801
802 /*
803 * If the message type is PSA_IPC_DISCONNECT, then the status code is
804 * ignored
805 */
806 break;
807 default:
808 if (msg->msg.type >= PSA_IPC_CALL) {
Shawn Shan038348e2021-09-08 17:11:04 +0800809
810#if PSA_FRAMEWORK_HAS_MM_IOVEC
811
812 /*
813 * If the unmapped function is not called for an input/output vector
814 * that has been mapped, the framework will remove the mapping.
815 */
816 int i;
817
818 for (i = 0; i < PSA_MAX_IOVEC * 2; i++) {
819 if (IOVEC_IS_MAPPED(msg, i) && (!IOVEC_IS_UNMAPPED(msg, i))) {
820 SET_IOVEC_UNMAPPED(msg, i);
821 /*
822 * Any output vectors that are still mapped will report that
823 * zero bytes have been written.
824 */
825 if (i >= OUTVEC_IDX_BASE) {
826 msg->outvec[i - OUTVEC_IDX_BASE].len = 0;
827 }
828 }
829 }
830
831#endif
Mingyang Sunb26b2802021-07-07 11:25:00 +0800832 /* Reply to a request message. Return values are based on status */
833 ret = status;
834 /*
835 * The total number of bytes written to a single parameter must be
836 * reported to the client by updating the len member of the
837 * psa_outvec structure for the parameter before returning from
838 * psa_call().
839 */
840 update_caller_outvec_len(msg);
841 if (SERVICE_IS_STATELESS(service->p_ldinf->flags)) {
842 tfm_spm_free_conn_handle(service, conn_handle);
843 }
844 } else {
845 tfm_core_panic();
846 }
847 }
848
849 if (ret == PSA_ERROR_PROGRAMMER_ERROR) {
850 /*
851 * If the source of the programmer error is a Secure Partition, the SPM
852 * must panic the Secure Partition in response to a PROGRAMMER ERROR.
853 */
854 if (TFM_CLIENT_ID_IS_NS(msg->msg.client_id)) {
855 conn_handle->status = TFM_HANDLE_STATUS_CONNECT_ERROR;
856 } else {
857 tfm_core_panic();
858 }
859 } else {
860 conn_handle->status = TFM_HANDLE_STATUS_IDLE;
861 }
862
Mingyang Sun620c8562021-11-10 11:44:58 +0800863 /*
864 * TODO: It can be optimized further by moving critical section protection
865 * to mailbox. Also need to check implementation when secure context is
866 * involved.
867 */
868 CRITICAL_SECTION_ENTER(cs_assert);
869 ret = backend_instance.replying(msg, ret);
870 CRITICAL_SECTION_LEAVE(cs_assert);
871
872 return ret;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800873}
874
875void tfm_spm_partition_psa_notify(int32_t partition_id)
876{
Ken Liu5d73c872021-08-19 19:23:17 +0800877 struct partition_t *p_pt = tfm_spm_get_partition_by_id(partition_id);
878
879 spm_assert_signal(p_pt, PSA_DOORBELL);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800880}
881
882void tfm_spm_partition_psa_clear(void)
883{
Ken Liu92ede9f2021-10-20 09:35:00 +0800884 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800885 struct partition_t *partition = NULL;
886
887 partition = tfm_spm_get_running_partition();
888 if (!partition) {
889 tfm_core_panic();
890 }
891
892 /*
893 * It is a fatal error if the Secure Partition's doorbell signal is not
894 * currently asserted.
895 */
896 if ((partition->signals_asserted & PSA_DOORBELL) == 0) {
897 tfm_core_panic();
898 }
Ken Liu92ede9f2021-10-20 09:35:00 +0800899
900 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800901 partition->signals_asserted &= ~PSA_DOORBELL;
Ken Liu92ede9f2021-10-20 09:35:00 +0800902 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800903}
904
905void tfm_spm_partition_psa_eoi(psa_signal_t irq_signal)
906{
Ken Liu92ede9f2021-10-20 09:35:00 +0800907 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800908 struct irq_load_info_t *irq_info = NULL;
909 struct partition_t *partition = NULL;
910
911 partition = tfm_spm_get_running_partition();
912 if (!partition) {
913 tfm_core_panic();
914 }
915
916 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
917 /* It is a fatal error if passed signal is not an interrupt signal. */
918 if (!irq_info) {
919 tfm_core_panic();
920 }
921
922 if (irq_info->flih_func) {
923 /* This API is for SLIH IRQs only */
Ken Liuf39d8eb2021-10-07 12:55:33 +0800924 tfm_core_panic();
Mingyang Sunb26b2802021-07-07 11:25:00 +0800925 }
926
927 /* It is a fatal error if passed signal is not currently asserted */
928 if ((partition->signals_asserted & irq_signal) == 0) {
929 tfm_core_panic();
930 }
931
Ken Liu92ede9f2021-10-20 09:35:00 +0800932 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800933 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +0800934 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800935
Kevin Pengd399a1f2021-09-08 15:33:14 +0800936 tfm_hal_irq_clear_pending(irq_info->source);
937 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800938}
939
940void tfm_spm_partition_psa_panic(void)
941{
942 /*
943 * PSA FF recommends that the SPM causes the system to restart when a secure
944 * partition panics.
945 */
946 tfm_hal_system_reset();
947}
948
Kevin Peng67a89fd2021-11-25 11:22:02 +0800949void tfm_spm_partition_psa_irq_enable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800950{
951 struct partition_t *partition;
952 struct irq_load_info_t *irq_info;
953
954 partition = tfm_spm_get_running_partition();
955 if (!partition) {
956 tfm_core_panic();
957 }
958
959 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
960 if (!irq_info) {
961 tfm_core_panic();
962 }
963
Kevin Pengd399a1f2021-09-08 15:33:14 +0800964 tfm_hal_irq_enable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800965}
966
Kevin Peng67a89fd2021-11-25 11:22:02 +0800967psa_irq_status_t tfm_spm_partition_psa_irq_disable(psa_signal_t irq_signal)
Mingyang Sunb26b2802021-07-07 11:25:00 +0800968{
969 struct partition_t *partition;
970 struct irq_load_info_t *irq_info;
971
972 partition = tfm_spm_get_running_partition();
973 if (!partition) {
974 tfm_core_panic();
975 }
976
977 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
978 if (!irq_info) {
979 tfm_core_panic();
980 }
981
Kevin Pengd399a1f2021-09-08 15:33:14 +0800982 tfm_hal_irq_disable(irq_info->source);
Mingyang Sunb26b2802021-07-07 11:25:00 +0800983
984 return 1;
985}
986
987void tfm_spm_partition_psa_reset_signal(psa_signal_t irq_signal)
988{
Ken Liu92ede9f2021-10-20 09:35:00 +0800989 struct critical_section_t cs_assert = CRITICAL_SECTION_STATIC_INIT;
Mingyang Sunb26b2802021-07-07 11:25:00 +0800990 struct irq_load_info_t *irq_info;
991 struct partition_t *partition;
992
993 partition = tfm_spm_get_running_partition();
994 if (!partition) {
995 tfm_core_panic();
996 }
997
998 irq_info = get_irq_info_for_signal(partition->p_ldinf, irq_signal);
999 if (!irq_info) {
1000 tfm_core_panic();
1001 }
1002
1003 if (!irq_info->flih_func) {
1004 /* This API is for FLIH IRQs only */
1005 tfm_core_panic();
1006 }
1007
1008 if ((partition->signals_asserted & irq_signal) == 0) {
1009 /* The signal is not asserted */
1010 tfm_core_panic();
1011 }
1012
Ken Liu92ede9f2021-10-20 09:35:00 +08001013 CRITICAL_SECTION_ENTER(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001014 partition->signals_asserted &= ~irq_signal;
Ken Liu92ede9f2021-10-20 09:35:00 +08001015 CRITICAL_SECTION_LEAVE(cs_assert);
Mingyang Sunb26b2802021-07-07 11:25:00 +08001016}
Shawn Shan038348e2021-09-08 17:11:04 +08001017
Xinyu Zhang2bc4d572021-12-27 16:37:46 +08001018/* psa_set_rhandle is only needed by connection-based services */
1019#if CONFIG_TFM_CONNECTION_BASED_SERVICE_API == 1
1020
1021void tfm_spm_partition_psa_set_rhandle(psa_handle_t msg_handle, void *rhandle)
1022{
1023 struct tfm_msg_body_t *msg = NULL;
1024 struct tfm_conn_handle_t *conn_handle;
1025
1026 /* It is a fatal error if message handle is invalid */
1027 msg = tfm_spm_get_msg_from_handle(msg_handle);
1028 if (!msg) {
1029 tfm_core_panic();
1030 }
1031
1032 /* It is a PROGRAMMER ERROR if a stateless service sets rhandle. */
1033 if (SERVICE_IS_STATELESS(msg->service->p_ldinf->flags)) {
1034 tfm_core_panic();
1035 }
1036
1037 msg->msg.rhandle = rhandle;
1038 conn_handle = tfm_spm_to_handle_instance(msg_handle);
1039
1040 /* Store reverse handle for following client calls. */
1041 tfm_spm_set_rhandle(msg->service, conn_handle, rhandle);
1042}
1043
1044#endif /* CONFIG_TFM_CONNECTION_BASED_SERVICE_API */
1045
Shawn Shan038348e2021-09-08 17:11:04 +08001046#if PSA_FRAMEWORK_HAS_MM_IOVEC
1047
1048const void *tfm_spm_partition_psa_map_invec(psa_handle_t msg_handle,
1049 uint32_t invec_idx)
1050{
1051 struct tfm_msg_body_t *msg = NULL;
1052 uint32_t privileged;
1053 struct partition_t *partition = NULL;
1054
1055 /* It is a fatal error if message handle is invalid */
1056 msg = tfm_spm_get_msg_from_handle(msg_handle);
1057 if (!msg) {
1058 tfm_core_panic();
1059 }
1060
1061 partition = msg->service->partition;
Kevin Penga40d29f2022-01-19 14:44:34 +08001062 privileged = GET_PARTITION_PRIVILEGED_MODE(partition->p_ldinf);
Shawn Shan038348e2021-09-08 17:11:04 +08001063
1064 /*
1065 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1066 * Service that received the message.
1067 */
1068 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1069 tfm_core_panic();
1070 }
1071
1072 /*
1073 * It is a fatal error if message handle does not refer to a request
1074 * message.
1075 */
1076 if (msg->msg.type < PSA_IPC_CALL) {
1077 tfm_core_panic();
1078 }
1079
1080 /*
1081 * It is a fatal error if invec_idx is equal to or greater than
1082 * PSA_MAX_IOVEC.
1083 */
1084 if (invec_idx >= PSA_MAX_IOVEC) {
1085 tfm_core_panic();
1086 }
1087
1088 /* It is a fatal error if the input vector has length zero. */
1089 if (msg->msg.in_size[invec_idx] == 0) {
1090 tfm_core_panic();
1091 }
1092
1093 /*
1094 * It is a fatal error if the input vector has already been mapped using
1095 * psa_map_invec().
1096 */
1097 if (IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1098 tfm_core_panic();
1099 }
1100
1101 /*
1102 * It is a fatal error if the input vector has already been accessed
1103 * using psa_read() or psa_skip().
1104 */
1105 if (IOVEC_IS_ACCESSED(msg, (invec_idx + INVEC_IDX_BASE))) {
1106 tfm_core_panic();
1107 }
1108
1109 /*
1110 * It is a fatal error if the memory reference for the wrap input vector is
1111 * invalid or not readable.
1112 */
1113 if (tfm_memory_check(msg->invec[invec_idx].base, msg->invec[invec_idx].len,
1114 false, TFM_MEMORY_ACCESS_RO, privileged) != SPM_SUCCESS) {
1115 tfm_core_panic();
1116 }
1117
1118 SET_IOVEC_MAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1119
1120 return msg->invec[invec_idx].base;
1121}
1122
1123void tfm_spm_partition_psa_unmap_invec(psa_handle_t msg_handle,
1124 uint32_t invec_idx)
1125{
1126 struct tfm_msg_body_t *msg = NULL;
1127
1128 /* It is a fatal error if message handle is invalid */
1129 msg = tfm_spm_get_msg_from_handle(msg_handle);
1130 if (!msg) {
1131 tfm_core_panic();
1132 }
1133
1134 /*
1135 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1136 * Service that received the message.
1137 */
1138 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1139 tfm_core_panic();
1140 }
1141
1142 /*
1143 * It is a fatal error if message handle does not refer to a request
1144 * message.
1145 */
1146 if (msg->msg.type < PSA_IPC_CALL) {
1147 tfm_core_panic();
1148 }
1149
1150 /*
1151 * It is a fatal error if invec_idx is equal to or greater than
1152 * PSA_MAX_IOVEC.
1153 */
1154 if (invec_idx >= PSA_MAX_IOVEC) {
1155 tfm_core_panic();
1156 }
1157
1158 /*
1159 * It is a fatal error if The input vector has not been mapped by a call to
1160 * psa_map_invec().
1161 */
1162 if (!IOVEC_IS_MAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1163 tfm_core_panic();
1164 }
1165
1166 /*
1167 * It is a fatal error if the input vector has already been unmapped by a
1168 * call to psa_unmap_invec().
1169 */
1170 if (IOVEC_IS_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE))) {
1171 tfm_core_panic();
1172 }
1173
1174 SET_IOVEC_UNMAPPED(msg, (invec_idx + INVEC_IDX_BASE));
1175}
1176
1177void *tfm_spm_partition_psa_map_outvec(psa_handle_t msg_handle,
1178 uint32_t outvec_idx)
1179{
1180 struct tfm_msg_body_t *msg = NULL;
1181 uint32_t privileged;
1182 struct partition_t *partition = NULL;
1183
1184 /* It is a fatal error if message handle is invalid */
1185 msg = tfm_spm_get_msg_from_handle(msg_handle);
1186 if (!msg) {
1187 tfm_core_panic();
1188 }
1189
1190 partition = msg->service->partition;
Kevin Penga40d29f2022-01-19 14:44:34 +08001191 privileged = GET_PARTITION_PRIVILEGED_MODE(partition->p_ldinf);
Shawn Shan038348e2021-09-08 17:11:04 +08001192
1193 /*
1194 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1195 * Service that received the message.
1196 */
1197 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1198 tfm_core_panic();
1199 }
1200
1201 /*
1202 * It is a fatal error if message handle does not refer to a request
1203 * message.
1204 */
1205 if (msg->msg.type < PSA_IPC_CALL) {
1206 tfm_core_panic();
1207 }
1208
1209 /*
1210 * It is a fatal error if outvec_idx is equal to or greater than
1211 * PSA_MAX_IOVEC.
1212 */
1213 if (outvec_idx >= PSA_MAX_IOVEC) {
1214 tfm_core_panic();
1215 }
1216
1217 /* It is a fatal error if the output vector has length zero. */
1218 if (msg->msg.out_size[outvec_idx] == 0) {
1219 tfm_core_panic();
1220 }
1221
1222 /*
1223 * It is a fatal error if the output vector has already been mapped using
1224 * psa_map_outvec().
1225 */
1226 if (IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1227 tfm_core_panic();
1228 }
1229
1230 /*
1231 * It is a fatal error if the output vector has already been accessed
1232 * using psa_write().
1233 */
1234 if (IOVEC_IS_ACCESSED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1235 tfm_core_panic();
1236 }
1237
1238 /*
1239 * It is a fatal error if the output vector is invalid or not read-write.
1240 */
1241 if (tfm_memory_check(msg->outvec[outvec_idx].base,
1242 msg->outvec[outvec_idx].len, false,
1243 TFM_MEMORY_ACCESS_RW, privileged) != SPM_SUCCESS) {
1244 tfm_core_panic();
1245 }
1246 SET_IOVEC_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1247
1248 return msg->outvec[outvec_idx].base;
1249}
1250
1251void tfm_spm_partition_psa_unmap_outvec(psa_handle_t msg_handle,
1252 uint32_t outvec_idx, size_t len)
1253{
1254 struct tfm_msg_body_t *msg = NULL;
1255
1256 /* It is a fatal error if message handle is invalid */
1257 msg = tfm_spm_get_msg_from_handle(msg_handle);
1258 if (!msg) {
1259 tfm_core_panic();
1260 }
1261
1262 /*
1263 * It is a fatal error if MM-IOVEC has not been enabled for the RoT
1264 * Service that received the message.
1265 */
1266 if (!SERVICE_ENABLED_MM_IOVEC(msg->service->p_ldinf->flags)) {
1267 tfm_core_panic();
1268 }
1269
1270 /*
1271 * It is a fatal error if message handle does not refer to a request
1272 * message.
1273 */
1274 if (msg->msg.type < PSA_IPC_CALL) {
1275 tfm_core_panic();
1276 }
1277
1278 /*
1279 * It is a fatal error if outvec_idx is equal to or greater than
1280 * PSA_MAX_IOVEC.
1281 */
1282 if (outvec_idx >= PSA_MAX_IOVEC) {
1283 tfm_core_panic();
1284 }
1285
1286 /*
1287 * It is a fatal error if len is greater than the output vector size.
1288 */
1289 if (len > msg->msg.out_size[outvec_idx]) {
1290 tfm_core_panic();
1291 }
1292
1293 /*
1294 * It is a fatal error if The output vector has not been mapped by a call to
1295 * psa_map_outvec().
1296 */
1297 if (!IOVEC_IS_MAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1298 tfm_core_panic();
1299 }
1300
1301 /*
1302 * It is a fatal error if the output vector has already been unmapped by a
1303 * call to psa_unmap_outvec().
1304 */
1305 if (IOVEC_IS_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE))) {
1306 tfm_core_panic();
1307 }
1308
1309 SET_IOVEC_UNMAPPED(msg, (outvec_idx + OUTVEC_IDX_BASE));
1310
1311 /* Update the write number */
1312 msg->outvec[outvec_idx].len = len;
1313}
1314
1315#endif /* PSA_FRAMEWORK_HAS_MM_IOVEC */