blob: 00b001ca67d61904b8a8bd4193abdf2aacd229e8 [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +01006#include <app.h>
Soby Mathewe7cf1822025-04-24 07:51:33 +01007#include <arch.h>
8#include <arch_features.h>
9#include <buffer.h>
10#include <debug.h>
11#include <dev.h>
12#include <dev_assign_app.h>
13#include <feature.h>
14#include <granule.h>
15#include <sizes.h>
16#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <string.h>
19#include <utils_def.h>
20
21/*
22 * This function will only be invoked when the PDEV create fails or when PDEV is
23 * being destroyed. Hence the PDEV will not be in use when this function is
24 * called and therefore no lock is acquired before its invocation.
25 */
26static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
27 unsigned int cnt, bool scrub)
28{
29 for (unsigned int i = 0U; i < cnt; i++) {
30 struct granule *g_pdev_aux = pdev_aux[i];
31
32 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
33 if (scrub) {
34 buffer_granule_memzero(g_pdev_aux,
35 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
36 }
37 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
38 }
39}
40
41/*
42 * todo:
43 * Validate device specific PDEV parameters by traversing all previously created
44 * PDEVs and check against current PDEV parameters. This implements
45 * RmiPdevParamsIsValid of RMM specification.
46 */
47static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
48
49{
50 (void)pd_params;
51 /*
52 * Check if device identifier, Root Port identifier, IDE stream
53 * identifier, RID range are valid.
54 */
55
56 /*
57 * Check if device identifier is not equal to the device identifier of
58 * another PDEV
59 */
60
61 /* Whether RID range does not overlap the RID range of another PDEV */
62
63 /*
64 * Every address range falls within an MMIO range permitted by the system
65 */
66
67 /*
68 * None of the address ranges overlaps another address range for this
69 * PDEV
70 */
71
72 return 0;
73}
74
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +010075static unsigned long pdev_get_aux_count_from_flags(unsigned long pdev_flags)
76{
77 unsigned long aux_count;
78
79 (void)pdev_flags;
80
81 /*
82 * The current implementation requires that RMI_PDEV_SPDM_TRUE
83 * is set in the flags.
84 */
85 assert(EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_flags) == RMI_PDEV_SPDM_TRUE);
86
87 /*
88 * Currently, the number of pages required to instantiate an app is
89 * hardcoded in the app header. In this implementation, aux_count
90 * does not depend on the flags set in pdev_flags. The worst case
91 * (i.e., the most granules) is assumed.
92 */
93 aux_count = app_get_required_granule_count(RMM_DEV_ASSIGN_APP_ID);
94 assert(aux_count <= PDEV_PARAM_AUX_GRANULES_MAX);
95
96 return aux_count;
97}
98
99/*
100 * smc_pdev_aux_count
101 *
102 * Get number of auxiliary Granules required for a PDEV.
103 *
104 * flags - PDEV flags
105 * res - SMC result
106 */
107void smc_pdev_aux_count(unsigned long flags, struct smc_result *res)
108{
109 if (is_rmi_feat_da_enabled()) {
110 res->x[0] = RMI_SUCCESS;
111 res->x[1] = pdev_get_aux_count_from_flags(flags);
112 } else {
113 res->x[0] = SMC_NOT_SUPPORTED;
114 }
115}
116
Soby Mathewe7cf1822025-04-24 07:51:33 +0100117/*
118 * smc_pdev_create
119 *
120 * pdev_ptr - PA of the PDEV
121 * pdev_params_ptr - PA of PDEV parameters
122 */
123unsigned long smc_pdev_create(unsigned long pdev_ptr,
124 unsigned long pdev_params_ptr)
125{
126 struct granule *g_pdev;
127 struct granule *g_pdev_params;
128 struct pdev *pd;
129 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
130 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100131 unsigned long num_aux_req;
Soby Mathewe7cf1822025-04-24 07:51:33 +0100132 bool ns_access_ok;
133 void *aux_mapped_addr;
134 struct dev_assign_params dparams;
135 unsigned long smc_rc;
136 int rc;
137
138 if (!is_rmi_feat_da_enabled()) {
139 return SMC_NOT_SUPPORTED;
140 }
141
142 if (!GRANULE_ALIGNED(pdev_ptr) ||
143 !GRANULE_ALIGNED(pdev_params_ptr)) {
144 return RMI_ERROR_INPUT;
145 }
146
147 /* Map and copy PDEV parameters */
148 g_pdev_params = find_granule(pdev_params_ptr);
149 if ((g_pdev_params == NULL) ||
150 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
151 return RMI_ERROR_INPUT;
152 }
153
154 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
155 sizeof(struct rmi_pdev_params),
156 &pdev_params);
157 if (!ns_access_ok) {
158 return RMI_ERROR_INPUT;
159 }
160
161 /*
162 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
163 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
164 */
165 /* coverity[uninit_use:SUPPRESS] */
166 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
167 RMI_PDEV_SPDM_TRUE) ||
168 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
169 RMI_PDEV_IDE_TRUE) ||
170 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
171 RMI_PDEV_COHERENT_FALSE) ||
172 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
173 RMI_PDEV_COHERENT_FALSE)) {
174 return RMI_ERROR_NOT_SUPPORTED;
175 }
176
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100177 /* Validate PDEV parameters num_aux */
178 num_aux_req = pdev_get_aux_count_from_flags(pdev_params.flags);
Soby Mathewe7cf1822025-04-24 07:51:33 +0100179 /* coverity[uninit_use:SUPPRESS] */
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100180 if ((pdev_params.num_aux == 0U) ||
181 (pdev_params.num_aux != num_aux_req)) {
182 ERROR("ERROR: PDEV need %ld aux granules, host allocated %ld.\n",
183 num_aux_req, pdev_params.num_aux);
184 return RMI_ERROR_INPUT;
185 }
186
187 /* Validate PDEV parameters ncoh_num_addr_range. */
188 /* coverity[uninit_use:SUPPRESS] */
189 if (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX) {
Soby Mathewe7cf1822025-04-24 07:51:33 +0100190 return RMI_ERROR_INPUT;
191 }
192
193 /* Validate hash algorithm */
194 /* coverity[uninit_use:SUPPRESS] */
195 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
196 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
197 return RMI_ERROR_INPUT;
198 }
199
200 /* cppcheck-suppress knownConditionTrueFalse */
201 if (validate_rmi_pdev_params(&pdev_params) != 0) {
202 return RMI_ERROR_INPUT;
203 }
204
205 /* Loop through pdev_aux_granules and transit them */
206 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
207 struct granule *g_pdev_aux;
208
209 /* coverity[uninit_use_in_call:SUPPRESS] */
210 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
211 GRANULE_STATE_DELEGATED);
212 if (g_pdev_aux == NULL) {
213 pdev_restore_aux_granules_state(pdev_aux_granules, i,
214 false);
215 return RMI_ERROR_INPUT;
216 }
217 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
218 pdev_aux_granules[i] = g_pdev_aux;
219 }
220
221 /* Lock pdev granule and map it */
222 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
223 if (g_pdev == NULL) {
224 smc_rc = RMI_ERROR_INPUT;
225 goto out_restore_pdev_aux_granule_state;
226 }
227
228 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
229 if (pd == NULL) {
230 smc_rc = RMI_ERROR_INPUT;
231 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
232 goto out_restore_pdev_aux_granule_state;
233 }
234
235 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
236 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
237 (unsigned int)pdev_params.num_aux);
238 if (aux_mapped_addr == NULL) {
239 smc_rc = RMI_ERROR_INPUT;
240 goto out_unmap_pdev_slot_buffer;
241 }
242
243 /* Call init routine to initialize device class specific state */
244 dparams.dev_handle = (void *)pd;
245 dparams.rmi_hash_algo = pdev_params.hash_algo;
246 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
247
248 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
249 RMI_PDEV_IDE_TRUE) {
250 dparams.has_ide = true;
251 dparams.ecam_addr = pdev_params.ecam_addr;
252 dparams.rp_id = pdev_params.root_id;
253 dparams.ide_sid = pdev_params.ide_sid;
254 } else {
255 dparams.has_ide = false;
256 }
257 /* Use the PDEV aux pages for the DA app */
258 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
259
260 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
261 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
262 }
263
264 rc = dev_assign_app_init(&pd->da_app_data,
265 granule_pas,
266 pdev_params.num_aux,
267 aux_mapped_addr, &dparams);
268
269 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
270 /* Initialize PDEV */
271 pd->g_pdev = g_pdev;
272 pd->rmi_state = RMI_PDEV_STATE_NEW;
273 pd->rmi_flags = pdev_params.flags;
274 pd->num_vdevs = 0;
275 pd->rmi_hash_algo = pdev_params.hash_algo;
276 pd->num_aux = (unsigned int)pdev_params.num_aux;
277 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
278 sizeof(struct granule *));
279
280 /* Initialize PDEV communication state */
281 pd->dev_comm_state = DEV_COMM_PENDING;
282
283 /* Initialize PDEV pcie device */
284 pd->dev.bdf = pdev_params.pdev_id;
285 pd->dev.segment_id = pdev_params.segment_id;
286 pd->dev.ecam_addr = pdev_params.ecam_addr;
287 pd->dev.root_id = pdev_params.root_id;
288 pd->dev.cert_slot_id = pdev_params.cert_id;
289 pd->dev.ide_sid = pdev_params.ide_sid;
290 pd->dev.rid_base = pdev_params.rid_base;
291 pd->dev.rid_top = pdev_params.rid_top;
292 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
293 (void)memcpy(&pd->dev.ncoh_addr_range,
294 &pdev_params.ncoh_addr_range,
295 (sizeof(struct rmi_address_range) *
296 pdev_params.ncoh_num_addr_range));
297
298 smc_rc = RMI_SUCCESS;
299 } else {
300 smc_rc = RMI_ERROR_INPUT;
301 }
302
303 /* Unmap all PDEV aux granules */
304 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
305
306out_unmap_pdev_slot_buffer:
307 /* unmap PDEV buffer from slot PDEV */
308 buffer_unmap(pd);
309
310 /*
311 * On success, unlock and transit the PDEV granule state to
312 * GRANULE_STATE_PDEV else unlock and retain the state at
313 * GRANULE_STATE_DELEGATED.
314 */
315 if (smc_rc == RMI_SUCCESS) {
316 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
317 } else {
318 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
319 }
320
321out_restore_pdev_aux_granule_state:
322 if (smc_rc != RMI_SUCCESS) {
323 /*
324 * Transit all PDEV AUX granule state back to
325 * GRANULE_STATE_DELEGATED
326 */
327 pdev_restore_aux_granules_state(pdev_aux_granules,
328 (unsigned int)pdev_params.num_aux, false);
329 }
330
331 return smc_rc;
332}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100333
Soby Mathewf2b16442025-04-24 12:00:25 +0100334
335/* Validate RmiDevCommData.RmiDevCommEnter argument passed by Host */
336static unsigned long copyin_and_validate_dev_comm_enter(
337 unsigned long dev_comm_data_ptr,
338 struct rmi_dev_comm_enter *enter_args,
339 unsigned int dev_comm_state)
340{
341 struct granule *g_dev_comm_data;
342 struct granule *g_buf;
343 bool ns_access_ok;
344
345 g_dev_comm_data = find_granule(dev_comm_data_ptr);
346 if ((g_dev_comm_data == NULL) ||
347 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
348 return RMI_ERROR_INPUT;
349 }
350
351 ns_access_ok = ns_buffer_read(SLOT_NS, g_dev_comm_data,
352 RMI_DEV_COMM_ENTER_OFFSET,
353 sizeof(struct rmi_dev_comm_enter),
354 enter_args);
355 if (!ns_access_ok) {
356 return RMI_ERROR_INPUT;
357 }
358
359 if (!GRANULE_ALIGNED(enter_args->req_addr) ||
360 !GRANULE_ALIGNED(enter_args->resp_addr) ||
361 (enter_args->resp_len > GRANULE_SIZE)) {
362 return RMI_ERROR_INPUT;
363 }
364
365 if ((enter_args->status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
366 (enter_args->resp_len == 0U)) {
367 return RMI_ERROR_INPUT;
368 }
369
370 /* Check if request and response buffers are in NS PAS */
371 g_buf = find_granule(enter_args->req_addr);
372 if ((g_buf == NULL) ||
373 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
374 return RMI_ERROR_INPUT;
375 }
376
377 g_buf = find_granule(enter_args->resp_addr);
378 if ((g_buf == NULL) ||
379 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
380 return RMI_ERROR_INPUT;
381 }
382
383 if ((dev_comm_state == DEV_COMM_ACTIVE) &&
384 ((enter_args->status != RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
385 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_ERROR))) {
386 return RMI_ERROR_DEVICE;
387 }
388
389 if ((dev_comm_state == DEV_COMM_PENDING) &&
390 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_NONE)) {
391 return RMI_ERROR_DEVICE;
392 }
393 return RMI_SUCCESS;
394}
395
396/*
397 * copyout DevCommExitArgs
398 */
399static unsigned long copyout_dev_comm_exit(unsigned long dev_comm_data_ptr,
400 struct rmi_dev_comm_exit *exit_args)
401{
402 struct granule *g_dev_comm_data;
403 bool ns_access_ok;
404
405 g_dev_comm_data = find_granule(dev_comm_data_ptr);
406 if ((g_dev_comm_data == NULL) ||
407 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
408 return RMI_ERROR_INPUT;
409 }
410
411 ns_access_ok = ns_buffer_write(SLOT_NS, g_dev_comm_data,
412 RMI_DEV_COMM_EXIT_OFFSET,
413 sizeof(struct rmi_dev_comm_exit),
414 exit_args);
415 if (!ns_access_ok) {
416 return RMI_ERROR_INPUT;
417 }
418
419 return RMI_SUCCESS;
420}
421
422static int pdev_dispatch_cmd(struct pdev *pd, struct rmi_dev_comm_enter *enter_args,
423 struct rmi_dev_comm_exit *exit_args)
424{
425 int rc;
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100426 struct dev_obj_digest *comm_digest_ptr;
427
428 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
429 comm_digest_ptr = &pd->cert_digest;
430 } else {
431 comm_digest_ptr = NULL;
432 }
Soby Mathewf2b16442025-04-24 12:00:25 +0100433
434 if (pd->dev_comm_state == DEV_COMM_ACTIVE) {
435 return dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100436 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_RESUME);
Soby Mathewf2b16442025-04-24 12:00:25 +0100437 }
438
439 switch (pd->rmi_state) {
440 case RMI_PDEV_STATE_NEW:
441 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100442 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT);
Soby Mathewf2b16442025-04-24 12:00:25 +0100443 break;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100444 case RMI_PDEV_STATE_STOPPING:
445 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
Arunachalam Ganapathyfabc0d02024-07-18 13:12:58 +0100446 exit_args, comm_digest_ptr, DEVICE_ASSIGN_APP_FUNC_ID_STOP_CONNECTION);
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100447 break;
Soby Mathewf2b16442025-04-24 12:00:25 +0100448 default:
449 assert(false);
450 rc = -1;
451 }
452
453 return rc;
454}
455
456static unsigned long dev_communicate(struct pdev *pd,
457 unsigned long dev_comm_data_ptr)
458{
459 struct rmi_dev_comm_enter enter_args;
460 struct rmi_dev_comm_exit exit_args;
461 void *aux_mapped_addr;
462 unsigned long comm_rc;
463 int rc;
464
465 assert(pd != NULL);
466
467 if ((pd->dev_comm_state == DEV_COMM_IDLE) ||
468 (pd->dev_comm_state == DEV_COMM_ERROR)) {
469 return RMI_ERROR_DEVICE;
470 }
471
472 /* Validate RmiDevCommEnter arguments in DevCommData */
473 /* coverity[uninit_use_in_call:SUPPRESS] */
474 comm_rc = copyin_and_validate_dev_comm_enter(dev_comm_data_ptr, &enter_args,
475 pd->dev_comm_state);
476 if (comm_rc != RMI_SUCCESS) {
477 return comm_rc;
478 }
479
480 /* Map PDEV aux granules */
481 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
482 assert(aux_mapped_addr != NULL);
483
484 rc = pdev_dispatch_cmd(pd, &enter_args, &exit_args);
485
486 /* Unmap all PDEV aux granules */
487 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
488
489 comm_rc = copyout_dev_comm_exit(dev_comm_data_ptr,
490 &exit_args);
491 if (comm_rc != RMI_SUCCESS) {
492 /* todo: device status is updated but copyout data failed? */
493 return RMI_ERROR_INPUT;
494 }
495
496 /*
497 * Based on the device communication results update the device IO state
498 * and PDEV state.
499 */
500 switch (rc) {
501 case DEV_ASSIGN_STATUS_COMM_BLOCKED:
502 pd->dev_comm_state = DEV_COMM_ACTIVE;
503 break;
504 case DEV_ASSIGN_STATUS_ERROR:
505 pd->dev_comm_state = DEV_COMM_ERROR;
506 break;
507 case DEV_ASSIGN_STATUS_SUCCESS:
508 pd->dev_comm_state = DEV_COMM_IDLE;
509 break;
510 default:
511 assert(false);
512 }
513
514 return RMI_SUCCESS;
515}
516
517/*
518 * smc_pdev_communicate
519 *
520 * pdev_ptr - PA of the PDEV
521 * data_ptr - PA of the communication data structure
522 */
523unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
524 unsigned long dev_comm_data_ptr)
525{
526 struct granule *g_pdev;
527 struct pdev *pd;
528 unsigned long rmi_rc;
529
530 if (!is_rmi_feat_da_enabled()) {
531 return SMC_NOT_SUPPORTED;
532 }
533
534 if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(dev_comm_data_ptr)) {
535 return RMI_ERROR_INPUT;
536 }
537
538 /* Lock pdev granule and map it */
539 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
540 if (g_pdev == NULL) {
541 return RMI_ERROR_INPUT;
542 }
543
544 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
545 if (pd == NULL) {
546 granule_unlock(g_pdev);
547 return RMI_ERROR_INPUT;
548 }
549
550 assert(pd->g_pdev == g_pdev);
551
552 rmi_rc = dev_communicate(pd, dev_comm_data_ptr);
553
554 /*
555 * Based on the device communication results update the device IO state
556 * and PDEV state.
557 */
558 switch (pd->dev_comm_state) {
559 case DEV_COMM_ERROR:
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100560 if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
561 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
562 } else {
563 pd->rmi_state = RMI_PDEV_STATE_ERROR;
564 }
Soby Mathewf2b16442025-04-24 12:00:25 +0100565 break;
566 case DEV_COMM_IDLE:
567 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
568 pd->rmi_state = RMI_PDEV_STATE_NEEDS_KEY;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100569 } else if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
570 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
Soby Mathewf2b16442025-04-24 12:00:25 +0100571 } else {
572 pd->rmi_state = RMI_PDEV_STATE_ERROR;
573 }
574 break;
575 case DEV_COMM_ACTIVE:
576 /* No state change required */
577 break;
578 case DEV_COMM_PENDING:
579 default:
580 assert(false);
581 }
582
583 buffer_unmap(pd);
584 granule_unlock(g_pdev);
585
586 return rmi_rc;
587}
588
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100589/*
590 * smc_pdev_get_state
591 *
592 * Get state of a PDEV.
593 *
594 * pdev_ptr - PA of the PDEV
595 * res - SMC result
596 */
597void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
598{
599 struct granule *g_pdev;
600 struct pdev *pd;
601
602 if (!is_rmi_feat_da_enabled()) {
603 res->x[0] = SMC_NOT_SUPPORTED;
604 return;
605 }
606
607 if (!GRANULE_ALIGNED(pdev_ptr)) {
608 goto out_err_input;
609 }
610
611 /* Lock pdev granule and map it */
612 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
613 if (g_pdev == NULL) {
614 goto out_err_input;
615 }
616
617 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
618 if (pd == NULL) {
619 granule_unlock(g_pdev);
620 goto out_err_input;
621 }
622
623 assert(pd->g_pdev == g_pdev);
624 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
625 res->x[0] = RMI_SUCCESS;
626 res->x[1] = pd->rmi_state;
627
628 buffer_unmap(pd);
629 granule_unlock(g_pdev);
630
631 return;
632
633out_err_input:
634 res->x[0] = RMI_ERROR_INPUT;
635}
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100636
637/*
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100638 * Stop the PDEV. This sets the PDEV state to STOPPING, and a PDEV communicate
639 * call can do device specific cleanup like terminating a secure session.
640 *
641 * pdev_ptr - PA of the PDEV
642 */
643unsigned long smc_pdev_stop(unsigned long pdev_ptr)
644{
645 struct granule *g_pdev;
646 unsigned long smc_rc;
647 struct pdev *pd;
648
649 if (!is_rmi_feat_da_enabled()) {
650 return SMC_NOT_SUPPORTED;
651 }
652
653 if (!GRANULE_ALIGNED(pdev_ptr)) {
654 return RMI_ERROR_INPUT;
655 }
656
657 /* Lock pdev granule and map it */
658 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
659 if (g_pdev == NULL) {
660 return RMI_ERROR_INPUT;
661 }
662
663 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
664 if (pd == NULL) {
665 granule_unlock(g_pdev);
666 return RMI_ERROR_INPUT;
667 }
668
669 if ((pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) ||
670 (pd->rmi_state == RMI_PDEV_STATE_STOPPING) ||
671 (pd->rmi_state == RMI_PDEV_STATE_STOPPED)) {
672 smc_rc = RMI_ERROR_DEVICE;
673 goto out_pdev_buf_unmap;
674 }
675
676 if (pd->num_vdevs != 0U) {
677 smc_rc = RMI_ERROR_DEVICE;
678 goto out_pdev_buf_unmap;
679 }
680
681 pd->rmi_state = RMI_PDEV_STATE_STOPPING;
682 pd->dev_comm_state = DEV_COMM_PENDING;
683 smc_rc = RMI_SUCCESS;
684
685out_pdev_buf_unmap:
686 buffer_unmap(pd);
687
688 granule_unlock(g_pdev);
689
690 return smc_rc;
691}
692
693/*
Arunachalam Ganapathy3356d462024-10-08 13:37:58 +0100694 * Abort device communication associated with a PDEV.
695 *
696 * pdev_ptr - PA of the PDEV
697 */
698unsigned long smc_pdev_abort(unsigned long pdev_ptr)
699{
700 int rc __unused;
701 struct granule *g_pdev;
702 void *aux_mapped_addr;
703 unsigned long smc_rc;
704 struct pdev *pd;
705
706 if (!is_rmi_feat_da_enabled()) {
707 return SMC_NOT_SUPPORTED;
708 }
709
710 if (!GRANULE_ALIGNED(pdev_ptr)) {
711 return RMI_ERROR_INPUT;
712 }
713
714 /* Lock pdev granule and map it */
715 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
716 if (g_pdev == NULL) {
717 return RMI_ERROR_INPUT;
718 }
719
720 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
721 if (pd == NULL) {
722 granule_unlock(g_pdev);
723 return RMI_ERROR_INPUT;
724 }
725
726 if ((pd->rmi_state != RMI_PDEV_STATE_NEW) &&
727 (pd->rmi_state != RMI_PDEV_STATE_HAS_KEY) &&
728 (pd->rmi_state != RMI_PDEV_STATE_COMMUNICATING)) {
729 smc_rc = RMI_ERROR_DEVICE;
730 goto out_pdev_buf_unmap;
731 }
732
733 /*
734 * If there is no active device communication, then mapping aux
735 * granules and cancelling an existing communication is not required.
736 */
737 if (pd->dev_comm_state != DEV_COMM_ACTIVE) {
738 goto pdev_reset_state;
739 }
740
741 /* Map PDEV aux granules */
742 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
743 assert(aux_mapped_addr != NULL);
744
745 /*
746 * This will resume the blocked CMA SPDM command and the recv callback
747 * handler will return error and abort the command.
748 */
749 rc = dev_assign_abort_app_operation(&pd->da_app_data);
750 assert(rc == 0);
751
752 /* Unmap all PDEV aux granules */
753 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
754
755pdev_reset_state:
756 /*
757 * As the device communication is aborted, if the PDEV is in
758 * communicating state then set the state to READY state.
759 *
760 * For other PDEV states, transition the comm_state to PENDING
761 * indicating RMM has device request which is ready to be sent to the
762 * device.
763 */
764 if (pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) {
765 pd->rmi_state = RMI_PDEV_STATE_READY;
766 pd->dev_comm_state = DEV_COMM_IDLE;
767 } else {
768 pd->dev_comm_state = DEV_COMM_PENDING;
769 }
770 smc_rc = RMI_SUCCESS;
771
772out_pdev_buf_unmap:
773 buffer_unmap(pd);
774
775 granule_unlock(g_pdev);
776
777 return smc_rc;
778}
779
780/*
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100781 * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED
782 * using RMI PDEV_DESTROY.
783 *
784 * pdev_ptr - PA of the PDEV
785 */
786unsigned long smc_pdev_destroy(unsigned long pdev_ptr)
787{
788 int rc __unused;
789 struct granule *g_pdev;
790 void *aux_mapped_addr;
791 struct pdev *pd;
792
793 if (!is_rmi_feat_da_enabled()) {
794 return SMC_NOT_SUPPORTED;
795 }
796
797 if (!GRANULE_ALIGNED(pdev_ptr)) {
798 return RMI_ERROR_INPUT;
799 }
800
801 /* Lock pdev granule and map it */
802 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
803 if (g_pdev == NULL) {
804 return RMI_ERROR_INPUT;
805 }
806
807 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
808 if (pd == NULL) {
809 granule_unlock(g_pdev);
810 return RMI_ERROR_INPUT;
811 }
812
813 if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) {
814 buffer_unmap(pd);
815 granule_unlock(g_pdev);
816 return RMI_ERROR_DEVICE;
817 }
818
819 /* Map PDEV aux granules and map PDEV heap */
820 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
821 assert(aux_mapped_addr != NULL);
822
823 /* Deinit the DSM context state */
824 rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT,
825 0, 0, 0, 0);
826 assert(rc == DEV_ASSIGN_STATUS_SUCCESS);
827
828 /* Unmap all PDEV aux granules and heap */
829 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
830
831 /*
832 * Scrub PDEV AUX granules and move its state from PDEV_AUX to
833 * delegated.
834 */
835 pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true);
836
837 /* Move the PDEV granule from PDEV to delegated state */
838 granule_memzero_mapped(pd);
839 buffer_unmap(pd);
840
841 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
842
843 return RMI_SUCCESS;
844}