blob: c16503ba6b017e91c94b0ba07ed0d84f18b41d17 [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +01006#include <app.h>
Soby Mathewe7cf1822025-04-24 07:51:33 +01007#include <arch.h>
8#include <arch_features.h>
9#include <buffer.h>
10#include <debug.h>
11#include <dev.h>
12#include <dev_assign_app.h>
13#include <feature.h>
14#include <granule.h>
15#include <sizes.h>
16#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <string.h>
19#include <utils_def.h>
20
21/*
22 * This function will only be invoked when the PDEV create fails or when PDEV is
23 * being destroyed. Hence the PDEV will not be in use when this function is
24 * called and therefore no lock is acquired before its invocation.
25 */
26static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
27 unsigned int cnt, bool scrub)
28{
29 for (unsigned int i = 0U; i < cnt; i++) {
30 struct granule *g_pdev_aux = pdev_aux[i];
31
32 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
33 if (scrub) {
34 buffer_granule_memzero(g_pdev_aux,
35 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
36 }
37 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
38 }
39}
40
41/*
42 * todo:
43 * Validate device specific PDEV parameters by traversing all previously created
44 * PDEVs and check against current PDEV parameters. This implements
45 * RmiPdevParamsIsValid of RMM specification.
46 */
47static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
48
49{
50 (void)pd_params;
51 /*
52 * Check if device identifier, Root Port identifier, IDE stream
53 * identifier, RID range are valid.
54 */
55
56 /*
57 * Check if device identifier is not equal to the device identifier of
58 * another PDEV
59 */
60
61 /* Whether RID range does not overlap the RID range of another PDEV */
62
63 /*
64 * Every address range falls within an MMIO range permitted by the system
65 */
66
67 /*
68 * None of the address ranges overlaps another address range for this
69 * PDEV
70 */
71
72 return 0;
73}
74
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +010075static unsigned long pdev_get_aux_count_from_flags(unsigned long pdev_flags)
76{
77 unsigned long aux_count;
78
79 (void)pdev_flags;
80
81 /*
82 * The current implementation requires that RMI_PDEV_SPDM_TRUE
83 * is set in the flags.
84 */
85 assert(EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_flags) == RMI_PDEV_SPDM_TRUE);
86
87 /*
88 * Currently, the number of pages required to instantiate an app is
89 * hardcoded in the app header. In this implementation, aux_count
90 * does not depend on the flags set in pdev_flags. The worst case
91 * (i.e., the most granules) is assumed.
92 */
93 aux_count = app_get_required_granule_count(RMM_DEV_ASSIGN_APP_ID);
94 assert(aux_count <= PDEV_PARAM_AUX_GRANULES_MAX);
95
96 return aux_count;
97}
98
99/*
100 * smc_pdev_aux_count
101 *
102 * Get number of auxiliary Granules required for a PDEV.
103 *
104 * flags - PDEV flags
105 * res - SMC result
106 */
107void smc_pdev_aux_count(unsigned long flags, struct smc_result *res)
108{
109 if (is_rmi_feat_da_enabled()) {
110 res->x[0] = RMI_SUCCESS;
111 res->x[1] = pdev_get_aux_count_from_flags(flags);
112 } else {
113 res->x[0] = SMC_NOT_SUPPORTED;
114 }
115}
116
Soby Mathewe7cf1822025-04-24 07:51:33 +0100117/*
118 * smc_pdev_create
119 *
120 * pdev_ptr - PA of the PDEV
121 * pdev_params_ptr - PA of PDEV parameters
122 */
123unsigned long smc_pdev_create(unsigned long pdev_ptr,
124 unsigned long pdev_params_ptr)
125{
126 struct granule *g_pdev;
127 struct granule *g_pdev_params;
128 struct pdev *pd;
129 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
130 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100131 unsigned long num_aux_req;
Soby Mathewe7cf1822025-04-24 07:51:33 +0100132 bool ns_access_ok;
133 void *aux_mapped_addr;
134 struct dev_assign_params dparams;
135 unsigned long smc_rc;
136 int rc;
137
138 if (!is_rmi_feat_da_enabled()) {
139 return SMC_NOT_SUPPORTED;
140 }
141
142 if (!GRANULE_ALIGNED(pdev_ptr) ||
143 !GRANULE_ALIGNED(pdev_params_ptr)) {
144 return RMI_ERROR_INPUT;
145 }
146
147 /* Map and copy PDEV parameters */
148 g_pdev_params = find_granule(pdev_params_ptr);
149 if ((g_pdev_params == NULL) ||
150 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
151 return RMI_ERROR_INPUT;
152 }
153
154 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
155 sizeof(struct rmi_pdev_params),
156 &pdev_params);
157 if (!ns_access_ok) {
158 return RMI_ERROR_INPUT;
159 }
160
161 /*
162 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
163 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
164 */
165 /* coverity[uninit_use:SUPPRESS] */
166 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
167 RMI_PDEV_SPDM_TRUE) ||
168 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
169 RMI_PDEV_IDE_TRUE) ||
170 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
171 RMI_PDEV_COHERENT_FALSE) ||
172 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
173 RMI_PDEV_COHERENT_FALSE)) {
174 return RMI_ERROR_NOT_SUPPORTED;
175 }
176
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100177 /* Validate PDEV parameters num_aux */
178 num_aux_req = pdev_get_aux_count_from_flags(pdev_params.flags);
Soby Mathewe7cf1822025-04-24 07:51:33 +0100179 /* coverity[uninit_use:SUPPRESS] */
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100180 if ((pdev_params.num_aux == 0U) ||
181 (pdev_params.num_aux != num_aux_req)) {
182 ERROR("ERROR: PDEV need %ld aux granules, host allocated %ld.\n",
183 num_aux_req, pdev_params.num_aux);
184 return RMI_ERROR_INPUT;
185 }
186
187 /* Validate PDEV parameters ncoh_num_addr_range. */
188 /* coverity[uninit_use:SUPPRESS] */
189 if (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX) {
Soby Mathewe7cf1822025-04-24 07:51:33 +0100190 return RMI_ERROR_INPUT;
191 }
192
193 /* Validate hash algorithm */
194 /* coverity[uninit_use:SUPPRESS] */
195 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
196 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
197 return RMI_ERROR_INPUT;
198 }
199
200 /* cppcheck-suppress knownConditionTrueFalse */
201 if (validate_rmi_pdev_params(&pdev_params) != 0) {
202 return RMI_ERROR_INPUT;
203 }
204
205 /* Loop through pdev_aux_granules and transit them */
206 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
207 struct granule *g_pdev_aux;
208
209 /* coverity[uninit_use_in_call:SUPPRESS] */
210 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
211 GRANULE_STATE_DELEGATED);
212 if (g_pdev_aux == NULL) {
213 pdev_restore_aux_granules_state(pdev_aux_granules, i,
214 false);
215 return RMI_ERROR_INPUT;
216 }
217 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
218 pdev_aux_granules[i] = g_pdev_aux;
219 }
220
221 /* Lock pdev granule and map it */
222 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
223 if (g_pdev == NULL) {
224 smc_rc = RMI_ERROR_INPUT;
225 goto out_restore_pdev_aux_granule_state;
226 }
227
228 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
229 if (pd == NULL) {
230 smc_rc = RMI_ERROR_INPUT;
231 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
232 goto out_restore_pdev_aux_granule_state;
233 }
234
235 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
236 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
237 (unsigned int)pdev_params.num_aux);
238 if (aux_mapped_addr == NULL) {
239 smc_rc = RMI_ERROR_INPUT;
240 goto out_unmap_pdev_slot_buffer;
241 }
242
243 /* Call init routine to initialize device class specific state */
244 dparams.dev_handle = (void *)pd;
245 dparams.rmi_hash_algo = pdev_params.hash_algo;
246 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
247
248 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
249 RMI_PDEV_IDE_TRUE) {
250 dparams.has_ide = true;
251 dparams.ecam_addr = pdev_params.ecam_addr;
252 dparams.rp_id = pdev_params.root_id;
253 dparams.ide_sid = pdev_params.ide_sid;
254 } else {
255 dparams.has_ide = false;
256 }
257 /* Use the PDEV aux pages for the DA app */
258 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
259
260 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
261 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
262 }
263
264 rc = dev_assign_app_init(&pd->da_app_data,
265 granule_pas,
266 pdev_params.num_aux,
267 aux_mapped_addr, &dparams);
268
269 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
270 /* Initialize PDEV */
271 pd->g_pdev = g_pdev;
272 pd->rmi_state = RMI_PDEV_STATE_NEW;
273 pd->rmi_flags = pdev_params.flags;
274 pd->num_vdevs = 0;
275 pd->rmi_hash_algo = pdev_params.hash_algo;
276 pd->num_aux = (unsigned int)pdev_params.num_aux;
277 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
278 sizeof(struct granule *));
279
280 /* Initialize PDEV communication state */
281 pd->dev_comm_state = DEV_COMM_PENDING;
282
283 /* Initialize PDEV pcie device */
284 pd->dev.bdf = pdev_params.pdev_id;
285 pd->dev.segment_id = pdev_params.segment_id;
286 pd->dev.ecam_addr = pdev_params.ecam_addr;
287 pd->dev.root_id = pdev_params.root_id;
288 pd->dev.cert_slot_id = pdev_params.cert_id;
289 pd->dev.ide_sid = pdev_params.ide_sid;
290 pd->dev.rid_base = pdev_params.rid_base;
291 pd->dev.rid_top = pdev_params.rid_top;
292 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
293 (void)memcpy(&pd->dev.ncoh_addr_range,
294 &pdev_params.ncoh_addr_range,
295 (sizeof(struct rmi_address_range) *
296 pdev_params.ncoh_num_addr_range));
297
298 smc_rc = RMI_SUCCESS;
299 } else {
300 smc_rc = RMI_ERROR_INPUT;
301 }
302
303 /* Unmap all PDEV aux granules */
304 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
305
306out_unmap_pdev_slot_buffer:
307 /* unmap PDEV buffer from slot PDEV */
308 buffer_unmap(pd);
309
310 /*
311 * On success, unlock and transit the PDEV granule state to
312 * GRANULE_STATE_PDEV else unlock and retain the state at
313 * GRANULE_STATE_DELEGATED.
314 */
315 if (smc_rc == RMI_SUCCESS) {
316 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
317 } else {
318 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
319 }
320
321out_restore_pdev_aux_granule_state:
322 if (smc_rc != RMI_SUCCESS) {
323 /*
324 * Transit all PDEV AUX granule state back to
325 * GRANULE_STATE_DELEGATED
326 */
327 pdev_restore_aux_granules_state(pdev_aux_granules,
328 (unsigned int)pdev_params.num_aux, false);
329 }
330
331 return smc_rc;
332}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100333
Soby Mathewf2b16442025-04-24 12:00:25 +0100334
335/* Validate RmiDevCommData.RmiDevCommEnter argument passed by Host */
336static unsigned long copyin_and_validate_dev_comm_enter(
337 unsigned long dev_comm_data_ptr,
338 struct rmi_dev_comm_enter *enter_args,
339 unsigned int dev_comm_state)
340{
341 struct granule *g_dev_comm_data;
342 struct granule *g_buf;
343 bool ns_access_ok;
344
345 g_dev_comm_data = find_granule(dev_comm_data_ptr);
346 if ((g_dev_comm_data == NULL) ||
347 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
348 return RMI_ERROR_INPUT;
349 }
350
351 ns_access_ok = ns_buffer_read(SLOT_NS, g_dev_comm_data,
352 RMI_DEV_COMM_ENTER_OFFSET,
353 sizeof(struct rmi_dev_comm_enter),
354 enter_args);
355 if (!ns_access_ok) {
356 return RMI_ERROR_INPUT;
357 }
358
359 if (!GRANULE_ALIGNED(enter_args->req_addr) ||
360 !GRANULE_ALIGNED(enter_args->resp_addr) ||
361 (enter_args->resp_len > GRANULE_SIZE)) {
362 return RMI_ERROR_INPUT;
363 }
364
365 if ((enter_args->status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
366 (enter_args->resp_len == 0U)) {
367 return RMI_ERROR_INPUT;
368 }
369
370 /* Check if request and response buffers are in NS PAS */
371 g_buf = find_granule(enter_args->req_addr);
372 if ((g_buf == NULL) ||
373 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
374 return RMI_ERROR_INPUT;
375 }
376
377 g_buf = find_granule(enter_args->resp_addr);
378 if ((g_buf == NULL) ||
379 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
380 return RMI_ERROR_INPUT;
381 }
382
383 if ((dev_comm_state == DEV_COMM_ACTIVE) &&
384 ((enter_args->status != RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
385 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_ERROR))) {
386 return RMI_ERROR_DEVICE;
387 }
388
389 if ((dev_comm_state == DEV_COMM_PENDING) &&
390 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_NONE)) {
391 return RMI_ERROR_DEVICE;
392 }
393 return RMI_SUCCESS;
394}
395
396/*
397 * copyout DevCommExitArgs
398 */
399static unsigned long copyout_dev_comm_exit(unsigned long dev_comm_data_ptr,
400 struct rmi_dev_comm_exit *exit_args)
401{
402 struct granule *g_dev_comm_data;
403 bool ns_access_ok;
404
405 g_dev_comm_data = find_granule(dev_comm_data_ptr);
406 if ((g_dev_comm_data == NULL) ||
407 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
408 return RMI_ERROR_INPUT;
409 }
410
411 ns_access_ok = ns_buffer_write(SLOT_NS, g_dev_comm_data,
412 RMI_DEV_COMM_EXIT_OFFSET,
413 sizeof(struct rmi_dev_comm_exit),
414 exit_args);
415 if (!ns_access_ok) {
416 return RMI_ERROR_INPUT;
417 }
418
419 return RMI_SUCCESS;
420}
421
422static int pdev_dispatch_cmd(struct pdev *pd, struct rmi_dev_comm_enter *enter_args,
423 struct rmi_dev_comm_exit *exit_args)
424{
425 int rc;
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100426 struct dev_obj_digest *comm_digest_ptr;
427
428 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
429 comm_digest_ptr = &pd->cert_digest;
430 } else {
431 comm_digest_ptr = NULL;
432 }
Soby Mathewf2b16442025-04-24 12:00:25 +0100433
434 if (pd->dev_comm_state == DEV_COMM_ACTIVE) {
435 return dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100436 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_RESUME);
Soby Mathewf2b16442025-04-24 12:00:25 +0100437 }
438
439 switch (pd->rmi_state) {
440 case RMI_PDEV_STATE_NEW:
441 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100442 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT);
Soby Mathewf2b16442025-04-24 12:00:25 +0100443 break;
Arunachalam Ganapathy789488b2024-08-08 17:16:39 +0100444 case RMI_PDEV_STATE_HAS_KEY:
445 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
446 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_SECURE_SESSION);
447 break;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100448 case RMI_PDEV_STATE_STOPPING:
449 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100450 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_STOP_CONNECTION);
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100451 break;
Soby Mathewf2b16442025-04-24 12:00:25 +0100452 default:
453 assert(false);
454 rc = -1;
455 }
456
457 return rc;
458}
459
460static unsigned long dev_communicate(struct pdev *pd,
461 unsigned long dev_comm_data_ptr)
462{
463 struct rmi_dev_comm_enter enter_args;
464 struct rmi_dev_comm_exit exit_args;
465 void *aux_mapped_addr;
466 unsigned long comm_rc;
467 int rc;
468
469 assert(pd != NULL);
470
471 if ((pd->dev_comm_state == DEV_COMM_IDLE) ||
472 (pd->dev_comm_state == DEV_COMM_ERROR)) {
473 return RMI_ERROR_DEVICE;
474 }
475
476 /* Validate RmiDevCommEnter arguments in DevCommData */
477 /* coverity[uninit_use_in_call:SUPPRESS] */
478 comm_rc = copyin_and_validate_dev_comm_enter(dev_comm_data_ptr, &enter_args,
479 pd->dev_comm_state);
480 if (comm_rc != RMI_SUCCESS) {
481 return comm_rc;
482 }
483
484 /* Map PDEV aux granules */
485 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
486 assert(aux_mapped_addr != NULL);
487
488 rc = pdev_dispatch_cmd(pd, &enter_args, &exit_args);
489
490 /* Unmap all PDEV aux granules */
491 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
492
493 comm_rc = copyout_dev_comm_exit(dev_comm_data_ptr,
494 &exit_args);
495 if (comm_rc != RMI_SUCCESS) {
496 /* todo: device status is updated but copyout data failed? */
497 return RMI_ERROR_INPUT;
498 }
499
500 /*
501 * Based on the device communication results update the device IO state
502 * and PDEV state.
503 */
504 switch (rc) {
505 case DEV_ASSIGN_STATUS_COMM_BLOCKED:
506 pd->dev_comm_state = DEV_COMM_ACTIVE;
507 break;
508 case DEV_ASSIGN_STATUS_ERROR:
509 pd->dev_comm_state = DEV_COMM_ERROR;
510 break;
511 case DEV_ASSIGN_STATUS_SUCCESS:
512 pd->dev_comm_state = DEV_COMM_IDLE;
513 break;
514 default:
515 assert(false);
516 }
517
518 return RMI_SUCCESS;
519}
520
521/*
522 * smc_pdev_communicate
523 *
524 * pdev_ptr - PA of the PDEV
525 * data_ptr - PA of the communication data structure
526 */
527unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
528 unsigned long dev_comm_data_ptr)
529{
530 struct granule *g_pdev;
531 struct pdev *pd;
532 unsigned long rmi_rc;
533
534 if (!is_rmi_feat_da_enabled()) {
535 return SMC_NOT_SUPPORTED;
536 }
537
538 if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(dev_comm_data_ptr)) {
539 return RMI_ERROR_INPUT;
540 }
541
542 /* Lock pdev granule and map it */
543 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
544 if (g_pdev == NULL) {
545 return RMI_ERROR_INPUT;
546 }
547
548 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
549 if (pd == NULL) {
550 granule_unlock(g_pdev);
551 return RMI_ERROR_INPUT;
552 }
553
554 assert(pd->g_pdev == g_pdev);
555
556 rmi_rc = dev_communicate(pd, dev_comm_data_ptr);
557
558 /*
559 * Based on the device communication results update the device IO state
560 * and PDEV state.
561 */
562 switch (pd->dev_comm_state) {
563 case DEV_COMM_ERROR:
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100564 if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
565 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
566 } else {
567 pd->rmi_state = RMI_PDEV_STATE_ERROR;
568 }
Soby Mathewf2b16442025-04-24 12:00:25 +0100569 break;
570 case DEV_COMM_IDLE:
571 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
572 pd->rmi_state = RMI_PDEV_STATE_NEEDS_KEY;
Arunachalam Ganapathy789488b2024-08-08 17:16:39 +0100573 } else if (pd->rmi_state == RMI_PDEV_STATE_HAS_KEY) {
574 pd->rmi_state = RMI_PDEV_STATE_READY;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100575 } else if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
576 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
Soby Mathewf2b16442025-04-24 12:00:25 +0100577 } else {
578 pd->rmi_state = RMI_PDEV_STATE_ERROR;
579 }
580 break;
581 case DEV_COMM_ACTIVE:
582 /* No state change required */
583 break;
584 case DEV_COMM_PENDING:
585 default:
586 assert(false);
587 }
588
589 buffer_unmap(pd);
590 granule_unlock(g_pdev);
591
592 return rmi_rc;
593}
594
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100595/*
596 * smc_pdev_get_state
597 *
598 * Get state of a PDEV.
599 *
600 * pdev_ptr - PA of the PDEV
601 * res - SMC result
602 */
603void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
604{
605 struct granule *g_pdev;
606 struct pdev *pd;
607
608 if (!is_rmi_feat_da_enabled()) {
609 res->x[0] = SMC_NOT_SUPPORTED;
610 return;
611 }
612
613 if (!GRANULE_ALIGNED(pdev_ptr)) {
614 goto out_err_input;
615 }
616
617 /* Lock pdev granule and map it */
618 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
619 if (g_pdev == NULL) {
620 goto out_err_input;
621 }
622
623 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
624 if (pd == NULL) {
625 granule_unlock(g_pdev);
626 goto out_err_input;
627 }
628
629 assert(pd->g_pdev == g_pdev);
630 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
631 res->x[0] = RMI_SUCCESS;
632 res->x[1] = pd->rmi_state;
633
634 buffer_unmap(pd);
635 granule_unlock(g_pdev);
636
637 return;
638
639out_err_input:
640 res->x[0] = RMI_ERROR_INPUT;
641}
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100642
Arunachalam Ganapathyecb52182024-07-26 16:22:51 +0100643static unsigned long get_expected_key_size(unsigned long rmi_key_algo)
644{
645 switch (rmi_key_algo) {
646 case RMI_SIGNATURE_ALGORITHM_ECDSA_P256:
Arunachalam Ganapathy73367de2025-06-30 12:39:05 +0100647 return 65;
Arunachalam Ganapathyecb52182024-07-26 16:22:51 +0100648 case RMI_SIGNATURE_ALGORITHM_ECDSA_P384:
Arunachalam Ganapathy73367de2025-06-30 12:39:05 +0100649 return 97;
Arunachalam Ganapathyecb52182024-07-26 16:22:51 +0100650 case RMI_SIGNATURE_ALGORITHM_RSASSA_3072:
651 return 384;
652 default:
653 return 0;
654 }
655}
656
657static bool public_key_sig_algo_valid(unsigned long rmi_key_algo)
658{
659 return (rmi_key_algo == RMI_SIGNATURE_ALGORITHM_ECDSA_P256) ||
660 (rmi_key_algo == RMI_SIGNATURE_ALGORITHM_ECDSA_P384) ||
661 (rmi_key_algo == RMI_SIGNATURE_ALGORITHM_RSASSA_3072);
662}
663
664/*
665 * Provide public key associated with a PDEV.
666 *
667 * pdev_ptr - PA of the PDEV
668 * pubkey_params_ptr - PA of the pubkey parameters
669 */
670unsigned long smc_pdev_set_pubkey(unsigned long pdev_ptr,
671 unsigned long pubkey_params_ptr)
672{
673 struct granule *g_pdev;
674 struct granule *g_pubkey_params;
675 void *aux_mapped_addr;
676 bool ns_access_ok;
677 struct pdev *pd;
678 struct rmi_public_key_params pubkey_params;
679 unsigned long dev_assign_public_key_sig_algo;
680 unsigned long dev_assign_public_key_expected_size;
681 unsigned long smc_rc;
682 int rc;
683
684 if (!is_rmi_feat_da_enabled()) {
685 return SMC_NOT_SUPPORTED;
686 }
687
688 if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(pubkey_params_ptr)) {
689 return RMI_ERROR_INPUT;
690 }
691
692 /* Map and copy public key parameter */
693 g_pubkey_params = find_granule(pubkey_params_ptr);
694 if ((g_pubkey_params == NULL) ||
695 (granule_unlocked_state(g_pubkey_params) != GRANULE_STATE_NS)) {
696 return RMI_ERROR_INPUT;
697 }
698
699 ns_access_ok = ns_buffer_read(SLOT_NS, g_pubkey_params, 0U,
700 sizeof(struct rmi_public_key_params),
701 &pubkey_params);
702 if (!ns_access_ok) {
703 return RMI_ERROR_INPUT;
704 }
705
706 /*
707 * Check key_len and metadata_len with max value.
708 */
709 /* coverity[uninit_use:SUPPRESS] */
710 if ((pubkey_params.key_len > PUBKEY_PARAM_KEY_LEN_MAX) ||
711 (pubkey_params.metadata_len > PUBKEY_PARAM_METADATA_LEN_MAX)) {
712 return RMI_ERROR_INPUT;
713 }
714
715 /* coverity[uninit_use:SUPPRESS] */
716 dev_assign_public_key_sig_algo = pubkey_params.algo;
717 if (!public_key_sig_algo_valid(dev_assign_public_key_sig_algo)) {
718 return RMI_ERROR_INPUT;
719 }
720
721 /*
722 * Validate 'key_len' against expected key length based on algorithm
723 */
724 dev_assign_public_key_expected_size = get_expected_key_size(dev_assign_public_key_sig_algo);
725 assert(dev_assign_public_key_expected_size != 0U);
726 if (pubkey_params.key_len != dev_assign_public_key_expected_size) {
727 return RMI_ERROR_INPUT;
728 }
729
730 /* Lock pdev granule and map it */
731 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
732 if (g_pdev == NULL) {
733 return RMI_ERROR_INPUT;
734 }
735
736 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
737 if (pd == NULL) {
738 granule_unlock(g_pdev);
739 return RMI_ERROR_INPUT;
740 }
741
742 assert(pd->g_pdev == g_pdev);
743
744 if (pd->rmi_state != RMI_PDEV_STATE_NEEDS_KEY) {
745 smc_rc = RMI_ERROR_DEVICE;
746 goto out_pdev_buf_unmap;
747 }
748
749 /* Map PDEV aux granules */
750 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
751 assert(aux_mapped_addr != NULL);
752
753 rc = dev_assign_set_public_key(&pd->da_app_data, &pubkey_params);
754 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
755 pd->dev_comm_state = DEV_COMM_PENDING;
756 pd->rmi_state = RMI_PDEV_STATE_HAS_KEY;
757 smc_rc = RMI_SUCCESS;
758 } else {
759 smc_rc = RMI_ERROR_DEVICE;
760 }
761
762 /* Unmap all PDEV aux granules */
763 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
764
765out_pdev_buf_unmap:
766 buffer_unmap(pd);
767
768 granule_unlock(g_pdev);
769
770 return smc_rc;
771}
772
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100773/*
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100774 * Stop the PDEV. This sets the PDEV state to STOPPING, and a PDEV communicate
775 * call can do device specific cleanup like terminating a secure session.
776 *
777 * pdev_ptr - PA of the PDEV
778 */
779unsigned long smc_pdev_stop(unsigned long pdev_ptr)
780{
781 struct granule *g_pdev;
782 unsigned long smc_rc;
783 struct pdev *pd;
784
785 if (!is_rmi_feat_da_enabled()) {
786 return SMC_NOT_SUPPORTED;
787 }
788
789 if (!GRANULE_ALIGNED(pdev_ptr)) {
790 return RMI_ERROR_INPUT;
791 }
792
793 /* Lock pdev granule and map it */
794 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
795 if (g_pdev == NULL) {
796 return RMI_ERROR_INPUT;
797 }
798
799 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
800 if (pd == NULL) {
801 granule_unlock(g_pdev);
802 return RMI_ERROR_INPUT;
803 }
804
805 if ((pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) ||
806 (pd->rmi_state == RMI_PDEV_STATE_STOPPING) ||
807 (pd->rmi_state == RMI_PDEV_STATE_STOPPED)) {
808 smc_rc = RMI_ERROR_DEVICE;
809 goto out_pdev_buf_unmap;
810 }
811
812 if (pd->num_vdevs != 0U) {
813 smc_rc = RMI_ERROR_DEVICE;
814 goto out_pdev_buf_unmap;
815 }
816
817 pd->rmi_state = RMI_PDEV_STATE_STOPPING;
818 pd->dev_comm_state = DEV_COMM_PENDING;
819 smc_rc = RMI_SUCCESS;
820
821out_pdev_buf_unmap:
822 buffer_unmap(pd);
823
824 granule_unlock(g_pdev);
825
826 return smc_rc;
827}
828
829/*
Arunachalam Ganapathy3356d462024-10-08 13:37:58 +0100830 * Abort device communication associated with a PDEV.
831 *
832 * pdev_ptr - PA of the PDEV
833 */
834unsigned long smc_pdev_abort(unsigned long pdev_ptr)
835{
836 int rc __unused;
837 struct granule *g_pdev;
838 void *aux_mapped_addr;
839 unsigned long smc_rc;
840 struct pdev *pd;
841
842 if (!is_rmi_feat_da_enabled()) {
843 return SMC_NOT_SUPPORTED;
844 }
845
846 if (!GRANULE_ALIGNED(pdev_ptr)) {
847 return RMI_ERROR_INPUT;
848 }
849
850 /* Lock pdev granule and map it */
851 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
852 if (g_pdev == NULL) {
853 return RMI_ERROR_INPUT;
854 }
855
856 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
857 if (pd == NULL) {
858 granule_unlock(g_pdev);
859 return RMI_ERROR_INPUT;
860 }
861
862 if ((pd->rmi_state != RMI_PDEV_STATE_NEW) &&
863 (pd->rmi_state != RMI_PDEV_STATE_HAS_KEY) &&
864 (pd->rmi_state != RMI_PDEV_STATE_COMMUNICATING)) {
865 smc_rc = RMI_ERROR_DEVICE;
866 goto out_pdev_buf_unmap;
867 }
868
869 /*
870 * If there is no active device communication, then mapping aux
871 * granules and cancelling an existing communication is not required.
872 */
873 if (pd->dev_comm_state != DEV_COMM_ACTIVE) {
874 goto pdev_reset_state;
875 }
876
877 /* Map PDEV aux granules */
878 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
879 assert(aux_mapped_addr != NULL);
880
881 /*
882 * This will resume the blocked CMA SPDM command and the recv callback
883 * handler will return error and abort the command.
884 */
885 rc = dev_assign_abort_app_operation(&pd->da_app_data);
886 assert(rc == 0);
887
888 /* Unmap all PDEV aux granules */
889 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
890
891pdev_reset_state:
892 /*
893 * As the device communication is aborted, if the PDEV is in
894 * communicating state then set the state to READY state.
895 *
896 * For other PDEV states, transition the comm_state to PENDING
897 * indicating RMM has device request which is ready to be sent to the
898 * device.
899 */
900 if (pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) {
901 pd->rmi_state = RMI_PDEV_STATE_READY;
902 pd->dev_comm_state = DEV_COMM_IDLE;
903 } else {
904 pd->dev_comm_state = DEV_COMM_PENDING;
905 }
906 smc_rc = RMI_SUCCESS;
907
908out_pdev_buf_unmap:
909 buffer_unmap(pd);
910
911 granule_unlock(g_pdev);
912
913 return smc_rc;
914}
915
916/*
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100917 * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED
918 * using RMI PDEV_DESTROY.
919 *
920 * pdev_ptr - PA of the PDEV
921 */
922unsigned long smc_pdev_destroy(unsigned long pdev_ptr)
923{
924 int rc __unused;
925 struct granule *g_pdev;
926 void *aux_mapped_addr;
927 struct pdev *pd;
928
929 if (!is_rmi_feat_da_enabled()) {
930 return SMC_NOT_SUPPORTED;
931 }
932
933 if (!GRANULE_ALIGNED(pdev_ptr)) {
934 return RMI_ERROR_INPUT;
935 }
936
937 /* Lock pdev granule and map it */
938 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
939 if (g_pdev == NULL) {
940 return RMI_ERROR_INPUT;
941 }
942
943 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
944 if (pd == NULL) {
945 granule_unlock(g_pdev);
946 return RMI_ERROR_INPUT;
947 }
948
949 if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) {
950 buffer_unmap(pd);
951 granule_unlock(g_pdev);
952 return RMI_ERROR_DEVICE;
953 }
954
955 /* Map PDEV aux granules and map PDEV heap */
956 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
957 assert(aux_mapped_addr != NULL);
958
959 /* Deinit the DSM context state */
960 rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT,
961 0, 0, 0, 0);
962 assert(rc == DEV_ASSIGN_STATUS_SUCCESS);
963
964 /* Unmap all PDEV aux granules and heap */
965 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
966
967 /*
968 * Scrub PDEV AUX granules and move its state from PDEV_AUX to
969 * delegated.
970 */
971 pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true);
972
973 /* Move the PDEV granule from PDEV to delegated state */
974 granule_memzero_mapped(pd);
975 buffer_unmap(pd);
976
977 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
978
979 return RMI_SUCCESS;
980}