blob: a52bc200861585ff1ef525662a184401434f14e7 [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +01006#include <app.h>
Soby Mathewe7cf1822025-04-24 07:51:33 +01007#include <arch.h>
8#include <arch_features.h>
9#include <buffer.h>
10#include <debug.h>
11#include <dev.h>
12#include <dev_assign_app.h>
13#include <feature.h>
14#include <granule.h>
15#include <sizes.h>
16#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <string.h>
19#include <utils_def.h>
20
21/*
22 * This function will only be invoked when the PDEV create fails or when PDEV is
23 * being destroyed. Hence the PDEV will not be in use when this function is
24 * called and therefore no lock is acquired before its invocation.
25 */
26static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
27 unsigned int cnt, bool scrub)
28{
29 for (unsigned int i = 0U; i < cnt; i++) {
30 struct granule *g_pdev_aux = pdev_aux[i];
31
32 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
33 if (scrub) {
34 buffer_granule_memzero(g_pdev_aux,
35 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
36 }
37 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
38 }
39}
40
41/*
42 * todo:
43 * Validate device specific PDEV parameters by traversing all previously created
44 * PDEVs and check against current PDEV parameters. This implements
45 * RmiPdevParamsIsValid of RMM specification.
46 */
47static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
48
49{
50 (void)pd_params;
51 /*
52 * Check if device identifier, Root Port identifier, IDE stream
53 * identifier, RID range are valid.
54 */
55
56 /*
57 * Check if device identifier is not equal to the device identifier of
58 * another PDEV
59 */
60
61 /* Whether RID range does not overlap the RID range of another PDEV */
62
63 /*
64 * Every address range falls within an MMIO range permitted by the system
65 */
66
67 /*
68 * None of the address ranges overlaps another address range for this
69 * PDEV
70 */
71
72 return 0;
73}
74
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +010075static unsigned long pdev_get_aux_count_from_flags(unsigned long pdev_flags)
76{
77 unsigned long aux_count;
78
79 (void)pdev_flags;
80
81 /*
82 * The current implementation requires that RMI_PDEV_SPDM_TRUE
83 * is set in the flags.
84 */
85 assert(EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_flags) == RMI_PDEV_SPDM_TRUE);
86
87 /*
88 * Currently, the number of pages required to instantiate an app is
89 * hardcoded in the app header. In this implementation, aux_count
90 * does not depend on the flags set in pdev_flags. The worst case
91 * (i.e., the most granules) is assumed.
92 */
93 aux_count = app_get_required_granule_count(RMM_DEV_ASSIGN_APP_ID);
94 assert(aux_count <= PDEV_PARAM_AUX_GRANULES_MAX);
95
96 return aux_count;
97}
98
99/*
100 * smc_pdev_aux_count
101 *
102 * Get number of auxiliary Granules required for a PDEV.
103 *
104 * flags - PDEV flags
105 * res - SMC result
106 */
107void smc_pdev_aux_count(unsigned long flags, struct smc_result *res)
108{
109 if (is_rmi_feat_da_enabled()) {
110 res->x[0] = RMI_SUCCESS;
111 res->x[1] = pdev_get_aux_count_from_flags(flags);
112 } else {
113 res->x[0] = SMC_NOT_SUPPORTED;
114 }
115}
116
Soby Mathewe7cf1822025-04-24 07:51:33 +0100117/*
118 * smc_pdev_create
119 *
120 * pdev_ptr - PA of the PDEV
121 * pdev_params_ptr - PA of PDEV parameters
122 */
123unsigned long smc_pdev_create(unsigned long pdev_ptr,
124 unsigned long pdev_params_ptr)
125{
126 struct granule *g_pdev;
127 struct granule *g_pdev_params;
128 struct pdev *pd;
129 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
130 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100131 unsigned long num_aux_req;
Soby Mathewe7cf1822025-04-24 07:51:33 +0100132 bool ns_access_ok;
133 void *aux_mapped_addr;
134 struct dev_assign_params dparams;
135 unsigned long smc_rc;
136 int rc;
137
138 if (!is_rmi_feat_da_enabled()) {
139 return SMC_NOT_SUPPORTED;
140 }
141
142 if (!GRANULE_ALIGNED(pdev_ptr) ||
143 !GRANULE_ALIGNED(pdev_params_ptr)) {
144 return RMI_ERROR_INPUT;
145 }
146
147 /* Map and copy PDEV parameters */
148 g_pdev_params = find_granule(pdev_params_ptr);
149 if ((g_pdev_params == NULL) ||
150 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
151 return RMI_ERROR_INPUT;
152 }
153
154 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
155 sizeof(struct rmi_pdev_params),
156 &pdev_params);
157 if (!ns_access_ok) {
158 return RMI_ERROR_INPUT;
159 }
160
161 /*
162 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
163 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
164 */
165 /* coverity[uninit_use:SUPPRESS] */
166 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
167 RMI_PDEV_SPDM_TRUE) ||
168 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
169 RMI_PDEV_IDE_TRUE) ||
170 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
171 RMI_PDEV_COHERENT_FALSE) ||
172 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
173 RMI_PDEV_COHERENT_FALSE)) {
174 return RMI_ERROR_NOT_SUPPORTED;
175 }
176
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100177 /* Validate PDEV parameters num_aux */
178 num_aux_req = pdev_get_aux_count_from_flags(pdev_params.flags);
Soby Mathewe7cf1822025-04-24 07:51:33 +0100179 /* coverity[uninit_use:SUPPRESS] */
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100180 if ((pdev_params.num_aux == 0U) ||
181 (pdev_params.num_aux != num_aux_req)) {
182 ERROR("ERROR: PDEV need %ld aux granules, host allocated %ld.\n",
183 num_aux_req, pdev_params.num_aux);
184 return RMI_ERROR_INPUT;
185 }
186
187 /* Validate PDEV parameters ncoh_num_addr_range. */
188 /* coverity[uninit_use:SUPPRESS] */
189 if (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX) {
Soby Mathewe7cf1822025-04-24 07:51:33 +0100190 return RMI_ERROR_INPUT;
191 }
192
193 /* Validate hash algorithm */
194 /* coverity[uninit_use:SUPPRESS] */
195 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
196 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
197 return RMI_ERROR_INPUT;
198 }
199
200 /* cppcheck-suppress knownConditionTrueFalse */
201 if (validate_rmi_pdev_params(&pdev_params) != 0) {
202 return RMI_ERROR_INPUT;
203 }
204
205 /* Loop through pdev_aux_granules and transit them */
206 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
207 struct granule *g_pdev_aux;
208
209 /* coverity[uninit_use_in_call:SUPPRESS] */
210 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
211 GRANULE_STATE_DELEGATED);
212 if (g_pdev_aux == NULL) {
213 pdev_restore_aux_granules_state(pdev_aux_granules, i,
214 false);
215 return RMI_ERROR_INPUT;
216 }
217 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
218 pdev_aux_granules[i] = g_pdev_aux;
219 }
220
221 /* Lock pdev granule and map it */
222 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
223 if (g_pdev == NULL) {
224 smc_rc = RMI_ERROR_INPUT;
225 goto out_restore_pdev_aux_granule_state;
226 }
227
228 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
229 if (pd == NULL) {
230 smc_rc = RMI_ERROR_INPUT;
231 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
232 goto out_restore_pdev_aux_granule_state;
233 }
234
235 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
236 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
237 (unsigned int)pdev_params.num_aux);
238 if (aux_mapped_addr == NULL) {
239 smc_rc = RMI_ERROR_INPUT;
240 goto out_unmap_pdev_slot_buffer;
241 }
242
243 /* Call init routine to initialize device class specific state */
244 dparams.dev_handle = (void *)pd;
245 dparams.rmi_hash_algo = pdev_params.hash_algo;
246 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
247
248 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
249 RMI_PDEV_IDE_TRUE) {
250 dparams.has_ide = true;
251 dparams.ecam_addr = pdev_params.ecam_addr;
252 dparams.rp_id = pdev_params.root_id;
253 dparams.ide_sid = pdev_params.ide_sid;
254 } else {
255 dparams.has_ide = false;
256 }
257 /* Use the PDEV aux pages for the DA app */
258 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
259
260 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
261 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
262 }
263
264 rc = dev_assign_app_init(&pd->da_app_data,
265 granule_pas,
266 pdev_params.num_aux,
267 aux_mapped_addr, &dparams);
268
269 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
270 /* Initialize PDEV */
271 pd->g_pdev = g_pdev;
272 pd->rmi_state = RMI_PDEV_STATE_NEW;
273 pd->rmi_flags = pdev_params.flags;
274 pd->num_vdevs = 0;
275 pd->rmi_hash_algo = pdev_params.hash_algo;
276 pd->num_aux = (unsigned int)pdev_params.num_aux;
277 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
278 sizeof(struct granule *));
279
280 /* Initialize PDEV communication state */
281 pd->dev_comm_state = DEV_COMM_PENDING;
282
283 /* Initialize PDEV pcie device */
284 pd->dev.bdf = pdev_params.pdev_id;
285 pd->dev.segment_id = pdev_params.segment_id;
286 pd->dev.ecam_addr = pdev_params.ecam_addr;
287 pd->dev.root_id = pdev_params.root_id;
288 pd->dev.cert_slot_id = pdev_params.cert_id;
289 pd->dev.ide_sid = pdev_params.ide_sid;
290 pd->dev.rid_base = pdev_params.rid_base;
291 pd->dev.rid_top = pdev_params.rid_top;
292 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
293 (void)memcpy(&pd->dev.ncoh_addr_range,
294 &pdev_params.ncoh_addr_range,
295 (sizeof(struct rmi_address_range) *
296 pdev_params.ncoh_num_addr_range));
297
298 smc_rc = RMI_SUCCESS;
299 } else {
300 smc_rc = RMI_ERROR_INPUT;
301 }
302
303 /* Unmap all PDEV aux granules */
304 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
305
306out_unmap_pdev_slot_buffer:
307 /* unmap PDEV buffer from slot PDEV */
308 buffer_unmap(pd);
309
310 /*
311 * On success, unlock and transit the PDEV granule state to
312 * GRANULE_STATE_PDEV else unlock and retain the state at
313 * GRANULE_STATE_DELEGATED.
314 */
315 if (smc_rc == RMI_SUCCESS) {
316 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
317 } else {
318 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
319 }
320
321out_restore_pdev_aux_granule_state:
322 if (smc_rc != RMI_SUCCESS) {
323 /*
324 * Transit all PDEV AUX granule state back to
325 * GRANULE_STATE_DELEGATED
326 */
327 pdev_restore_aux_granules_state(pdev_aux_granules,
328 (unsigned int)pdev_params.num_aux, false);
329 }
330
331 return smc_rc;
332}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100333
Soby Mathewf2b16442025-04-24 12:00:25 +0100334
335/* Validate RmiDevCommData.RmiDevCommEnter argument passed by Host */
336static unsigned long copyin_and_validate_dev_comm_enter(
337 unsigned long dev_comm_data_ptr,
338 struct rmi_dev_comm_enter *enter_args,
339 unsigned int dev_comm_state)
340{
341 struct granule *g_dev_comm_data;
342 struct granule *g_buf;
343 bool ns_access_ok;
344
345 g_dev_comm_data = find_granule(dev_comm_data_ptr);
346 if ((g_dev_comm_data == NULL) ||
347 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
348 return RMI_ERROR_INPUT;
349 }
350
351 ns_access_ok = ns_buffer_read(SLOT_NS, g_dev_comm_data,
352 RMI_DEV_COMM_ENTER_OFFSET,
353 sizeof(struct rmi_dev_comm_enter),
354 enter_args);
355 if (!ns_access_ok) {
356 return RMI_ERROR_INPUT;
357 }
358
359 if (!GRANULE_ALIGNED(enter_args->req_addr) ||
360 !GRANULE_ALIGNED(enter_args->resp_addr) ||
361 (enter_args->resp_len > GRANULE_SIZE)) {
362 return RMI_ERROR_INPUT;
363 }
364
365 if ((enter_args->status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
366 (enter_args->resp_len == 0U)) {
367 return RMI_ERROR_INPUT;
368 }
369
370 /* Check if request and response buffers are in NS PAS */
371 g_buf = find_granule(enter_args->req_addr);
372 if ((g_buf == NULL) ||
373 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
374 return RMI_ERROR_INPUT;
375 }
376
377 g_buf = find_granule(enter_args->resp_addr);
378 if ((g_buf == NULL) ||
379 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
380 return RMI_ERROR_INPUT;
381 }
382
383 if ((dev_comm_state == DEV_COMM_ACTIVE) &&
384 ((enter_args->status != RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
385 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_ERROR))) {
386 return RMI_ERROR_DEVICE;
387 }
388
389 if ((dev_comm_state == DEV_COMM_PENDING) &&
390 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_NONE)) {
391 return RMI_ERROR_DEVICE;
392 }
393 return RMI_SUCCESS;
394}
395
396/*
397 * copyout DevCommExitArgs
398 */
399static unsigned long copyout_dev_comm_exit(unsigned long dev_comm_data_ptr,
400 struct rmi_dev_comm_exit *exit_args)
401{
402 struct granule *g_dev_comm_data;
403 bool ns_access_ok;
404
405 g_dev_comm_data = find_granule(dev_comm_data_ptr);
406 if ((g_dev_comm_data == NULL) ||
407 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
408 return RMI_ERROR_INPUT;
409 }
410
411 ns_access_ok = ns_buffer_write(SLOT_NS, g_dev_comm_data,
412 RMI_DEV_COMM_EXIT_OFFSET,
413 sizeof(struct rmi_dev_comm_exit),
414 exit_args);
415 if (!ns_access_ok) {
416 return RMI_ERROR_INPUT;
417 }
418
419 return RMI_SUCCESS;
420}
421
422static int pdev_dispatch_cmd(struct pdev *pd, struct rmi_dev_comm_enter *enter_args,
423 struct rmi_dev_comm_exit *exit_args)
424{
425 int rc;
426
427 if (pd->dev_comm_state == DEV_COMM_ACTIVE) {
428 return dev_assign_dev_communicate(&pd->da_app_data, enter_args,
429 exit_args, DEVICE_ASSIGN_APP_FUNC_ID_RESUME);
430 }
431
432 switch (pd->rmi_state) {
433 case RMI_PDEV_STATE_NEW:
434 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
435 exit_args, DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT);
436 break;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100437 case RMI_PDEV_STATE_STOPPING:
438 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
439 exit_args, DEVICE_ASSIGN_APP_FUNC_ID_STOP_CONNECTION);
440 break;
Soby Mathewf2b16442025-04-24 12:00:25 +0100441 default:
442 assert(false);
443 rc = -1;
444 }
445
446 return rc;
447}
448
449static unsigned long dev_communicate(struct pdev *pd,
450 unsigned long dev_comm_data_ptr)
451{
452 struct rmi_dev_comm_enter enter_args;
453 struct rmi_dev_comm_exit exit_args;
454 void *aux_mapped_addr;
455 unsigned long comm_rc;
456 int rc;
457
458 assert(pd != NULL);
459
460 if ((pd->dev_comm_state == DEV_COMM_IDLE) ||
461 (pd->dev_comm_state == DEV_COMM_ERROR)) {
462 return RMI_ERROR_DEVICE;
463 }
464
465 /* Validate RmiDevCommEnter arguments in DevCommData */
466 /* coverity[uninit_use_in_call:SUPPRESS] */
467 comm_rc = copyin_and_validate_dev_comm_enter(dev_comm_data_ptr, &enter_args,
468 pd->dev_comm_state);
469 if (comm_rc != RMI_SUCCESS) {
470 return comm_rc;
471 }
472
473 /* Map PDEV aux granules */
474 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
475 assert(aux_mapped_addr != NULL);
476
477 rc = pdev_dispatch_cmd(pd, &enter_args, &exit_args);
478
479 /* Unmap all PDEV aux granules */
480 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
481
482 comm_rc = copyout_dev_comm_exit(dev_comm_data_ptr,
483 &exit_args);
484 if (comm_rc != RMI_SUCCESS) {
485 /* todo: device status is updated but copyout data failed? */
486 return RMI_ERROR_INPUT;
487 }
488
489 /*
490 * Based on the device communication results update the device IO state
491 * and PDEV state.
492 */
493 switch (rc) {
494 case DEV_ASSIGN_STATUS_COMM_BLOCKED:
495 pd->dev_comm_state = DEV_COMM_ACTIVE;
496 break;
497 case DEV_ASSIGN_STATUS_ERROR:
498 pd->dev_comm_state = DEV_COMM_ERROR;
499 break;
500 case DEV_ASSIGN_STATUS_SUCCESS:
501 pd->dev_comm_state = DEV_COMM_IDLE;
502 break;
503 default:
504 assert(false);
505 }
506
507 return RMI_SUCCESS;
508}
509
510/*
511 * smc_pdev_communicate
512 *
513 * pdev_ptr - PA of the PDEV
514 * data_ptr - PA of the communication data structure
515 */
516unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
517 unsigned long dev_comm_data_ptr)
518{
519 struct granule *g_pdev;
520 struct pdev *pd;
521 unsigned long rmi_rc;
522
523 if (!is_rmi_feat_da_enabled()) {
524 return SMC_NOT_SUPPORTED;
525 }
526
527 if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(dev_comm_data_ptr)) {
528 return RMI_ERROR_INPUT;
529 }
530
531 /* Lock pdev granule and map it */
532 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
533 if (g_pdev == NULL) {
534 return RMI_ERROR_INPUT;
535 }
536
537 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
538 if (pd == NULL) {
539 granule_unlock(g_pdev);
540 return RMI_ERROR_INPUT;
541 }
542
543 assert(pd->g_pdev == g_pdev);
544
545 rmi_rc = dev_communicate(pd, dev_comm_data_ptr);
546
547 /*
548 * Based on the device communication results update the device IO state
549 * and PDEV state.
550 */
551 switch (pd->dev_comm_state) {
552 case DEV_COMM_ERROR:
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100553 if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
554 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
555 } else {
556 pd->rmi_state = RMI_PDEV_STATE_ERROR;
557 }
Soby Mathewf2b16442025-04-24 12:00:25 +0100558 break;
559 case DEV_COMM_IDLE:
560 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
561 pd->rmi_state = RMI_PDEV_STATE_NEEDS_KEY;
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100562 } else if (pd->rmi_state == RMI_PDEV_STATE_STOPPING) {
563 pd->rmi_state = RMI_PDEV_STATE_STOPPED;
Soby Mathewf2b16442025-04-24 12:00:25 +0100564 } else {
565 pd->rmi_state = RMI_PDEV_STATE_ERROR;
566 }
567 break;
568 case DEV_COMM_ACTIVE:
569 /* No state change required */
570 break;
571 case DEV_COMM_PENDING:
572 default:
573 assert(false);
574 }
575
576 buffer_unmap(pd);
577 granule_unlock(g_pdev);
578
579 return rmi_rc;
580}
581
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100582/*
583 * smc_pdev_get_state
584 *
585 * Get state of a PDEV.
586 *
587 * pdev_ptr - PA of the PDEV
588 * res - SMC result
589 */
590void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
591{
592 struct granule *g_pdev;
593 struct pdev *pd;
594
595 if (!is_rmi_feat_da_enabled()) {
596 res->x[0] = SMC_NOT_SUPPORTED;
597 return;
598 }
599
600 if (!GRANULE_ALIGNED(pdev_ptr)) {
601 goto out_err_input;
602 }
603
604 /* Lock pdev granule and map it */
605 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
606 if (g_pdev == NULL) {
607 goto out_err_input;
608 }
609
610 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
611 if (pd == NULL) {
612 granule_unlock(g_pdev);
613 goto out_err_input;
614 }
615
616 assert(pd->g_pdev == g_pdev);
617 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
618 res->x[0] = RMI_SUCCESS;
619 res->x[1] = pd->rmi_state;
620
621 buffer_unmap(pd);
622 granule_unlock(g_pdev);
623
624 return;
625
626out_err_input:
627 res->x[0] = RMI_ERROR_INPUT;
628}
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100629
630/*
Arunachalam Ganapathyc36fb572024-10-08 17:15:32 +0100631 * Stop the PDEV. This sets the PDEV state to STOPPING, and a PDEV communicate
632 * call can do device specific cleanup like terminating a secure session.
633 *
634 * pdev_ptr - PA of the PDEV
635 */
636unsigned long smc_pdev_stop(unsigned long pdev_ptr)
637{
638 struct granule *g_pdev;
639 unsigned long smc_rc;
640 struct pdev *pd;
641
642 if (!is_rmi_feat_da_enabled()) {
643 return SMC_NOT_SUPPORTED;
644 }
645
646 if (!GRANULE_ALIGNED(pdev_ptr)) {
647 return RMI_ERROR_INPUT;
648 }
649
650 /* Lock pdev granule and map it */
651 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
652 if (g_pdev == NULL) {
653 return RMI_ERROR_INPUT;
654 }
655
656 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
657 if (pd == NULL) {
658 granule_unlock(g_pdev);
659 return RMI_ERROR_INPUT;
660 }
661
662 if ((pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) ||
663 (pd->rmi_state == RMI_PDEV_STATE_STOPPING) ||
664 (pd->rmi_state == RMI_PDEV_STATE_STOPPED)) {
665 smc_rc = RMI_ERROR_DEVICE;
666 goto out_pdev_buf_unmap;
667 }
668
669 if (pd->num_vdevs != 0U) {
670 smc_rc = RMI_ERROR_DEVICE;
671 goto out_pdev_buf_unmap;
672 }
673
674 pd->rmi_state = RMI_PDEV_STATE_STOPPING;
675 pd->dev_comm_state = DEV_COMM_PENDING;
676 smc_rc = RMI_SUCCESS;
677
678out_pdev_buf_unmap:
679 buffer_unmap(pd);
680
681 granule_unlock(g_pdev);
682
683 return smc_rc;
684}
685
686/*
Arunachalam Ganapathy3356d462024-10-08 13:37:58 +0100687 * Abort device communication associated with a PDEV.
688 *
689 * pdev_ptr - PA of the PDEV
690 */
691unsigned long smc_pdev_abort(unsigned long pdev_ptr)
692{
693 int rc __unused;
694 struct granule *g_pdev;
695 void *aux_mapped_addr;
696 unsigned long smc_rc;
697 struct pdev *pd;
698
699 if (!is_rmi_feat_da_enabled()) {
700 return SMC_NOT_SUPPORTED;
701 }
702
703 if (!GRANULE_ALIGNED(pdev_ptr)) {
704 return RMI_ERROR_INPUT;
705 }
706
707 /* Lock pdev granule and map it */
708 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
709 if (g_pdev == NULL) {
710 return RMI_ERROR_INPUT;
711 }
712
713 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
714 if (pd == NULL) {
715 granule_unlock(g_pdev);
716 return RMI_ERROR_INPUT;
717 }
718
719 if ((pd->rmi_state != RMI_PDEV_STATE_NEW) &&
720 (pd->rmi_state != RMI_PDEV_STATE_HAS_KEY) &&
721 (pd->rmi_state != RMI_PDEV_STATE_COMMUNICATING)) {
722 smc_rc = RMI_ERROR_DEVICE;
723 goto out_pdev_buf_unmap;
724 }
725
726 /*
727 * If there is no active device communication, then mapping aux
728 * granules and cancelling an existing communication is not required.
729 */
730 if (pd->dev_comm_state != DEV_COMM_ACTIVE) {
731 goto pdev_reset_state;
732 }
733
734 /* Map PDEV aux granules */
735 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
736 assert(aux_mapped_addr != NULL);
737
738 /*
739 * This will resume the blocked CMA SPDM command and the recv callback
740 * handler will return error and abort the command.
741 */
742 rc = dev_assign_abort_app_operation(&pd->da_app_data);
743 assert(rc == 0);
744
745 /* Unmap all PDEV aux granules */
746 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
747
748pdev_reset_state:
749 /*
750 * As the device communication is aborted, if the PDEV is in
751 * communicating state then set the state to READY state.
752 *
753 * For other PDEV states, transition the comm_state to PENDING
754 * indicating RMM has device request which is ready to be sent to the
755 * device.
756 */
757 if (pd->rmi_state == RMI_PDEV_STATE_COMMUNICATING) {
758 pd->rmi_state = RMI_PDEV_STATE_READY;
759 pd->dev_comm_state = DEV_COMM_IDLE;
760 } else {
761 pd->dev_comm_state = DEV_COMM_PENDING;
762 }
763 smc_rc = RMI_SUCCESS;
764
765out_pdev_buf_unmap:
766 buffer_unmap(pd);
767
768 granule_unlock(g_pdev);
769
770 return smc_rc;
771}
772
773/*
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100774 * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED
775 * using RMI PDEV_DESTROY.
776 *
777 * pdev_ptr - PA of the PDEV
778 */
779unsigned long smc_pdev_destroy(unsigned long pdev_ptr)
780{
781 int rc __unused;
782 struct granule *g_pdev;
783 void *aux_mapped_addr;
784 struct pdev *pd;
785
786 if (!is_rmi_feat_da_enabled()) {
787 return SMC_NOT_SUPPORTED;
788 }
789
790 if (!GRANULE_ALIGNED(pdev_ptr)) {
791 return RMI_ERROR_INPUT;
792 }
793
794 /* Lock pdev granule and map it */
795 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
796 if (g_pdev == NULL) {
797 return RMI_ERROR_INPUT;
798 }
799
800 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
801 if (pd == NULL) {
802 granule_unlock(g_pdev);
803 return RMI_ERROR_INPUT;
804 }
805
806 if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) {
807 buffer_unmap(pd);
808 granule_unlock(g_pdev);
809 return RMI_ERROR_DEVICE;
810 }
811
812 /* Map PDEV aux granules and map PDEV heap */
813 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
814 assert(aux_mapped_addr != NULL);
815
816 /* Deinit the DSM context state */
817 rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT,
818 0, 0, 0, 0);
819 assert(rc == DEV_ASSIGN_STATUS_SUCCESS);
820
821 /* Unmap all PDEV aux granules and heap */
822 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
823
824 /*
825 * Scrub PDEV AUX granules and move its state from PDEV_AUX to
826 * delegated.
827 */
828 pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true);
829
830 /* Move the PDEV granule from PDEV to delegated state */
831 granule_memzero_mapped(pd);
832 buffer_unmap(pd);
833
834 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
835
836 return RMI_SUCCESS;
837}