blob: 210eb7656fa4e63f6d9c1fc7a98f5c2675d5925d [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +01006#include <app.h>
Soby Mathewe7cf1822025-04-24 07:51:33 +01007#include <arch.h>
8#include <arch_features.h>
9#include <buffer.h>
10#include <debug.h>
11#include <dev.h>
12#include <dev_assign_app.h>
13#include <feature.h>
14#include <granule.h>
15#include <sizes.h>
16#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <string.h>
19#include <utils_def.h>
20
21/*
22 * This function will only be invoked when the PDEV create fails or when PDEV is
23 * being destroyed. Hence the PDEV will not be in use when this function is
24 * called and therefore no lock is acquired before its invocation.
25 */
26static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
27 unsigned int cnt, bool scrub)
28{
29 for (unsigned int i = 0U; i < cnt; i++) {
30 struct granule *g_pdev_aux = pdev_aux[i];
31
32 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
33 if (scrub) {
34 buffer_granule_memzero(g_pdev_aux,
35 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
36 }
37 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
38 }
39}
40
41/*
42 * todo:
43 * Validate device specific PDEV parameters by traversing all previously created
44 * PDEVs and check against current PDEV parameters. This implements
45 * RmiPdevParamsIsValid of RMM specification.
46 */
47static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
48
49{
50 (void)pd_params;
51 /*
52 * Check if device identifier, Root Port identifier, IDE stream
53 * identifier, RID range are valid.
54 */
55
56 /*
57 * Check if device identifier is not equal to the device identifier of
58 * another PDEV
59 */
60
61 /* Whether RID range does not overlap the RID range of another PDEV */
62
63 /*
64 * Every address range falls within an MMIO range permitted by the system
65 */
66
67 /*
68 * None of the address ranges overlaps another address range for this
69 * PDEV
70 */
71
72 return 0;
73}
74
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +010075static unsigned long pdev_get_aux_count_from_flags(unsigned long pdev_flags)
76{
77 unsigned long aux_count;
78
79 (void)pdev_flags;
80
81 /*
82 * The current implementation requires that RMI_PDEV_SPDM_TRUE
83 * is set in the flags.
84 */
85 assert(EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_flags) == RMI_PDEV_SPDM_TRUE);
86
87 /*
88 * Currently, the number of pages required to instantiate an app is
89 * hardcoded in the app header. In this implementation, aux_count
90 * does not depend on the flags set in pdev_flags. The worst case
91 * (i.e., the most granules) is assumed.
92 */
93 aux_count = app_get_required_granule_count(RMM_DEV_ASSIGN_APP_ID);
94 assert(aux_count <= PDEV_PARAM_AUX_GRANULES_MAX);
95
96 return aux_count;
97}
98
99/*
100 * smc_pdev_aux_count
101 *
102 * Get number of auxiliary Granules required for a PDEV.
103 *
104 * flags - PDEV flags
105 * res - SMC result
106 */
107void smc_pdev_aux_count(unsigned long flags, struct smc_result *res)
108{
109 if (is_rmi_feat_da_enabled()) {
110 res->x[0] = RMI_SUCCESS;
111 res->x[1] = pdev_get_aux_count_from_flags(flags);
112 } else {
113 res->x[0] = SMC_NOT_SUPPORTED;
114 }
115}
116
Soby Mathewe7cf1822025-04-24 07:51:33 +0100117/*
118 * smc_pdev_create
119 *
120 * pdev_ptr - PA of the PDEV
121 * pdev_params_ptr - PA of PDEV parameters
122 */
123unsigned long smc_pdev_create(unsigned long pdev_ptr,
124 unsigned long pdev_params_ptr)
125{
126 struct granule *g_pdev;
127 struct granule *g_pdev_params;
128 struct pdev *pd;
129 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
130 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100131 unsigned long num_aux_req;
Soby Mathewe7cf1822025-04-24 07:51:33 +0100132 bool ns_access_ok;
133 void *aux_mapped_addr;
134 struct dev_assign_params dparams;
135 unsigned long smc_rc;
136 int rc;
137
138 if (!is_rmi_feat_da_enabled()) {
139 return SMC_NOT_SUPPORTED;
140 }
141
142 if (!GRANULE_ALIGNED(pdev_ptr) ||
143 !GRANULE_ALIGNED(pdev_params_ptr)) {
144 return RMI_ERROR_INPUT;
145 }
146
147 /* Map and copy PDEV parameters */
148 g_pdev_params = find_granule(pdev_params_ptr);
149 if ((g_pdev_params == NULL) ||
150 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
151 return RMI_ERROR_INPUT;
152 }
153
154 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
155 sizeof(struct rmi_pdev_params),
156 &pdev_params);
157 if (!ns_access_ok) {
158 return RMI_ERROR_INPUT;
159 }
160
161 /*
162 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
163 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
164 */
165 /* coverity[uninit_use:SUPPRESS] */
166 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
167 RMI_PDEV_SPDM_TRUE) ||
168 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
169 RMI_PDEV_IDE_TRUE) ||
170 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
171 RMI_PDEV_COHERENT_FALSE) ||
172 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
173 RMI_PDEV_COHERENT_FALSE)) {
174 return RMI_ERROR_NOT_SUPPORTED;
175 }
176
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100177 /* Validate PDEV parameters num_aux */
178 num_aux_req = pdev_get_aux_count_from_flags(pdev_params.flags);
Soby Mathewe7cf1822025-04-24 07:51:33 +0100179 /* coverity[uninit_use:SUPPRESS] */
Arunachalam Ganapathy8f6f6262024-10-10 11:42:00 +0100180 if ((pdev_params.num_aux == 0U) ||
181 (pdev_params.num_aux != num_aux_req)) {
182 ERROR("ERROR: PDEV need %ld aux granules, host allocated %ld.\n",
183 num_aux_req, pdev_params.num_aux);
184 return RMI_ERROR_INPUT;
185 }
186
187 /* Validate PDEV parameters ncoh_num_addr_range. */
188 /* coverity[uninit_use:SUPPRESS] */
189 if (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX) {
Soby Mathewe7cf1822025-04-24 07:51:33 +0100190 return RMI_ERROR_INPUT;
191 }
192
193 /* Validate hash algorithm */
194 /* coverity[uninit_use:SUPPRESS] */
195 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
196 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
197 return RMI_ERROR_INPUT;
198 }
199
200 /* cppcheck-suppress knownConditionTrueFalse */
201 if (validate_rmi_pdev_params(&pdev_params) != 0) {
202 return RMI_ERROR_INPUT;
203 }
204
205 /* Loop through pdev_aux_granules and transit them */
206 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
207 struct granule *g_pdev_aux;
208
209 /* coverity[uninit_use_in_call:SUPPRESS] */
210 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
211 GRANULE_STATE_DELEGATED);
212 if (g_pdev_aux == NULL) {
213 pdev_restore_aux_granules_state(pdev_aux_granules, i,
214 false);
215 return RMI_ERROR_INPUT;
216 }
217 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
218 pdev_aux_granules[i] = g_pdev_aux;
219 }
220
221 /* Lock pdev granule and map it */
222 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
223 if (g_pdev == NULL) {
224 smc_rc = RMI_ERROR_INPUT;
225 goto out_restore_pdev_aux_granule_state;
226 }
227
228 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
229 if (pd == NULL) {
230 smc_rc = RMI_ERROR_INPUT;
231 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
232 goto out_restore_pdev_aux_granule_state;
233 }
234
235 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
236 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
237 (unsigned int)pdev_params.num_aux);
238 if (aux_mapped_addr == NULL) {
239 smc_rc = RMI_ERROR_INPUT;
240 goto out_unmap_pdev_slot_buffer;
241 }
242
243 /* Call init routine to initialize device class specific state */
244 dparams.dev_handle = (void *)pd;
245 dparams.rmi_hash_algo = pdev_params.hash_algo;
246 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
247
248 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
249 RMI_PDEV_IDE_TRUE) {
250 dparams.has_ide = true;
251 dparams.ecam_addr = pdev_params.ecam_addr;
252 dparams.rp_id = pdev_params.root_id;
253 dparams.ide_sid = pdev_params.ide_sid;
254 } else {
255 dparams.has_ide = false;
256 }
257 /* Use the PDEV aux pages for the DA app */
258 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
259
260 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
261 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
262 }
263
264 rc = dev_assign_app_init(&pd->da_app_data,
265 granule_pas,
266 pdev_params.num_aux,
267 aux_mapped_addr, &dparams);
268
269 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
270 /* Initialize PDEV */
271 pd->g_pdev = g_pdev;
272 pd->rmi_state = RMI_PDEV_STATE_NEW;
273 pd->rmi_flags = pdev_params.flags;
274 pd->num_vdevs = 0;
275 pd->rmi_hash_algo = pdev_params.hash_algo;
276 pd->num_aux = (unsigned int)pdev_params.num_aux;
277 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
278 sizeof(struct granule *));
279
280 /* Initialize PDEV communication state */
281 pd->dev_comm_state = DEV_COMM_PENDING;
282
283 /* Initialize PDEV pcie device */
284 pd->dev.bdf = pdev_params.pdev_id;
285 pd->dev.segment_id = pdev_params.segment_id;
286 pd->dev.ecam_addr = pdev_params.ecam_addr;
287 pd->dev.root_id = pdev_params.root_id;
288 pd->dev.cert_slot_id = pdev_params.cert_id;
289 pd->dev.ide_sid = pdev_params.ide_sid;
290 pd->dev.rid_base = pdev_params.rid_base;
291 pd->dev.rid_top = pdev_params.rid_top;
292 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
293 (void)memcpy(&pd->dev.ncoh_addr_range,
294 &pdev_params.ncoh_addr_range,
295 (sizeof(struct rmi_address_range) *
296 pdev_params.ncoh_num_addr_range));
297
298 smc_rc = RMI_SUCCESS;
299 } else {
300 smc_rc = RMI_ERROR_INPUT;
301 }
302
303 /* Unmap all PDEV aux granules */
304 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
305
306out_unmap_pdev_slot_buffer:
307 /* unmap PDEV buffer from slot PDEV */
308 buffer_unmap(pd);
309
310 /*
311 * On success, unlock and transit the PDEV granule state to
312 * GRANULE_STATE_PDEV else unlock and retain the state at
313 * GRANULE_STATE_DELEGATED.
314 */
315 if (smc_rc == RMI_SUCCESS) {
316 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
317 } else {
318 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
319 }
320
321out_restore_pdev_aux_granule_state:
322 if (smc_rc != RMI_SUCCESS) {
323 /*
324 * Transit all PDEV AUX granule state back to
325 * GRANULE_STATE_DELEGATED
326 */
327 pdev_restore_aux_granules_state(pdev_aux_granules,
328 (unsigned int)pdev_params.num_aux, false);
329 }
330
331 return smc_rc;
332}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100333
Soby Mathewf2b16442025-04-24 12:00:25 +0100334
335/* Validate RmiDevCommData.RmiDevCommEnter argument passed by Host */
336static unsigned long copyin_and_validate_dev_comm_enter(
337 unsigned long dev_comm_data_ptr,
338 struct rmi_dev_comm_enter *enter_args,
339 unsigned int dev_comm_state)
340{
341 struct granule *g_dev_comm_data;
342 struct granule *g_buf;
343 bool ns_access_ok;
344
345 g_dev_comm_data = find_granule(dev_comm_data_ptr);
346 if ((g_dev_comm_data == NULL) ||
347 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
348 return RMI_ERROR_INPUT;
349 }
350
351 ns_access_ok = ns_buffer_read(SLOT_NS, g_dev_comm_data,
352 RMI_DEV_COMM_ENTER_OFFSET,
353 sizeof(struct rmi_dev_comm_enter),
354 enter_args);
355 if (!ns_access_ok) {
356 return RMI_ERROR_INPUT;
357 }
358
359 if (!GRANULE_ALIGNED(enter_args->req_addr) ||
360 !GRANULE_ALIGNED(enter_args->resp_addr) ||
361 (enter_args->resp_len > GRANULE_SIZE)) {
362 return RMI_ERROR_INPUT;
363 }
364
365 if ((enter_args->status == RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
366 (enter_args->resp_len == 0U)) {
367 return RMI_ERROR_INPUT;
368 }
369
370 /* Check if request and response buffers are in NS PAS */
371 g_buf = find_granule(enter_args->req_addr);
372 if ((g_buf == NULL) ||
373 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
374 return RMI_ERROR_INPUT;
375 }
376
377 g_buf = find_granule(enter_args->resp_addr);
378 if ((g_buf == NULL) ||
379 (granule_unlocked_state(g_buf) != GRANULE_STATE_NS)) {
380 return RMI_ERROR_INPUT;
381 }
382
383 if ((dev_comm_state == DEV_COMM_ACTIVE) &&
384 ((enter_args->status != RMI_DEV_COMM_ENTER_STATUS_RESPONSE) &&
385 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_ERROR))) {
386 return RMI_ERROR_DEVICE;
387 }
388
389 if ((dev_comm_state == DEV_COMM_PENDING) &&
390 (enter_args->status != RMI_DEV_COMM_ENTER_STATUS_NONE)) {
391 return RMI_ERROR_DEVICE;
392 }
393 return RMI_SUCCESS;
394}
395
396/*
397 * copyout DevCommExitArgs
398 */
399static unsigned long copyout_dev_comm_exit(unsigned long dev_comm_data_ptr,
400 struct rmi_dev_comm_exit *exit_args)
401{
402 struct granule *g_dev_comm_data;
403 bool ns_access_ok;
404
405 g_dev_comm_data = find_granule(dev_comm_data_ptr);
406 if ((g_dev_comm_data == NULL) ||
407 (granule_unlocked_state(g_dev_comm_data) != GRANULE_STATE_NS)) {
408 return RMI_ERROR_INPUT;
409 }
410
411 ns_access_ok = ns_buffer_write(SLOT_NS, g_dev_comm_data,
412 RMI_DEV_COMM_EXIT_OFFSET,
413 sizeof(struct rmi_dev_comm_exit),
414 exit_args);
415 if (!ns_access_ok) {
416 return RMI_ERROR_INPUT;
417 }
418
419 return RMI_SUCCESS;
420}
421
422static int pdev_dispatch_cmd(struct pdev *pd, struct rmi_dev_comm_enter *enter_args,
423 struct rmi_dev_comm_exit *exit_args)
424{
425 int rc;
426
427 if (pd->dev_comm_state == DEV_COMM_ACTIVE) {
428 return dev_assign_dev_communicate(&pd->da_app_data, enter_args,
429 exit_args, DEVICE_ASSIGN_APP_FUNC_ID_RESUME);
430 }
431
432 switch (pd->rmi_state) {
433 case RMI_PDEV_STATE_NEW:
434 rc = dev_assign_dev_communicate(&pd->da_app_data, enter_args,
435 exit_args, DEVICE_ASSIGN_APP_FUNC_ID_CONNECT_INIT);
436 break;
437 default:
438 assert(false);
439 rc = -1;
440 }
441
442 return rc;
443}
444
445static unsigned long dev_communicate(struct pdev *pd,
446 unsigned long dev_comm_data_ptr)
447{
448 struct rmi_dev_comm_enter enter_args;
449 struct rmi_dev_comm_exit exit_args;
450 void *aux_mapped_addr;
451 unsigned long comm_rc;
452 int rc;
453
454 assert(pd != NULL);
455
456 if ((pd->dev_comm_state == DEV_COMM_IDLE) ||
457 (pd->dev_comm_state == DEV_COMM_ERROR)) {
458 return RMI_ERROR_DEVICE;
459 }
460
461 /* Validate RmiDevCommEnter arguments in DevCommData */
462 /* coverity[uninit_use_in_call:SUPPRESS] */
463 comm_rc = copyin_and_validate_dev_comm_enter(dev_comm_data_ptr, &enter_args,
464 pd->dev_comm_state);
465 if (comm_rc != RMI_SUCCESS) {
466 return comm_rc;
467 }
468
469 /* Map PDEV aux granules */
470 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
471 assert(aux_mapped_addr != NULL);
472
473 rc = pdev_dispatch_cmd(pd, &enter_args, &exit_args);
474
475 /* Unmap all PDEV aux granules */
476 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
477
478 comm_rc = copyout_dev_comm_exit(dev_comm_data_ptr,
479 &exit_args);
480 if (comm_rc != RMI_SUCCESS) {
481 /* todo: device status is updated but copyout data failed? */
482 return RMI_ERROR_INPUT;
483 }
484
485 /*
486 * Based on the device communication results update the device IO state
487 * and PDEV state.
488 */
489 switch (rc) {
490 case DEV_ASSIGN_STATUS_COMM_BLOCKED:
491 pd->dev_comm_state = DEV_COMM_ACTIVE;
492 break;
493 case DEV_ASSIGN_STATUS_ERROR:
494 pd->dev_comm_state = DEV_COMM_ERROR;
495 break;
496 case DEV_ASSIGN_STATUS_SUCCESS:
497 pd->dev_comm_state = DEV_COMM_IDLE;
498 break;
499 default:
500 assert(false);
501 }
502
503 return RMI_SUCCESS;
504}
505
506/*
507 * smc_pdev_communicate
508 *
509 * pdev_ptr - PA of the PDEV
510 * data_ptr - PA of the communication data structure
511 */
512unsigned long smc_pdev_communicate(unsigned long pdev_ptr,
513 unsigned long dev_comm_data_ptr)
514{
515 struct granule *g_pdev;
516 struct pdev *pd;
517 unsigned long rmi_rc;
518
519 if (!is_rmi_feat_da_enabled()) {
520 return SMC_NOT_SUPPORTED;
521 }
522
523 if (!GRANULE_ALIGNED(pdev_ptr) || !GRANULE_ALIGNED(dev_comm_data_ptr)) {
524 return RMI_ERROR_INPUT;
525 }
526
527 /* Lock pdev granule and map it */
528 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
529 if (g_pdev == NULL) {
530 return RMI_ERROR_INPUT;
531 }
532
533 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
534 if (pd == NULL) {
535 granule_unlock(g_pdev);
536 return RMI_ERROR_INPUT;
537 }
538
539 assert(pd->g_pdev == g_pdev);
540
541 rmi_rc = dev_communicate(pd, dev_comm_data_ptr);
542
543 /*
544 * Based on the device communication results update the device IO state
545 * and PDEV state.
546 */
547 switch (pd->dev_comm_state) {
548 case DEV_COMM_ERROR:
549 pd->rmi_state = RMI_PDEV_STATE_ERROR;
550 break;
551 case DEV_COMM_IDLE:
552 if (pd->rmi_state == RMI_PDEV_STATE_NEW) {
553 pd->rmi_state = RMI_PDEV_STATE_NEEDS_KEY;
554 } else {
555 pd->rmi_state = RMI_PDEV_STATE_ERROR;
556 }
557 break;
558 case DEV_COMM_ACTIVE:
559 /* No state change required */
560 break;
561 case DEV_COMM_PENDING:
562 default:
563 assert(false);
564 }
565
566 buffer_unmap(pd);
567 granule_unlock(g_pdev);
568
569 return rmi_rc;
570}
571
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100572/*
573 * smc_pdev_get_state
574 *
575 * Get state of a PDEV.
576 *
577 * pdev_ptr - PA of the PDEV
578 * res - SMC result
579 */
580void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
581{
582 struct granule *g_pdev;
583 struct pdev *pd;
584
585 if (!is_rmi_feat_da_enabled()) {
586 res->x[0] = SMC_NOT_SUPPORTED;
587 return;
588 }
589
590 if (!GRANULE_ALIGNED(pdev_ptr)) {
591 goto out_err_input;
592 }
593
594 /* Lock pdev granule and map it */
595 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
596 if (g_pdev == NULL) {
597 goto out_err_input;
598 }
599
600 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
601 if (pd == NULL) {
602 granule_unlock(g_pdev);
603 goto out_err_input;
604 }
605
606 assert(pd->g_pdev == g_pdev);
607 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
608 res->x[0] = RMI_SUCCESS;
609 res->x[1] = pd->rmi_state;
610
611 buffer_unmap(pd);
612 granule_unlock(g_pdev);
613
614 return;
615
616out_err_input:
617 res->x[0] = RMI_ERROR_INPUT;
618}
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100619
620/*
621 * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED
622 * using RMI PDEV_DESTROY.
623 *
624 * pdev_ptr - PA of the PDEV
625 */
626unsigned long smc_pdev_destroy(unsigned long pdev_ptr)
627{
628 int rc __unused;
629 struct granule *g_pdev;
630 void *aux_mapped_addr;
631 struct pdev *pd;
632
633 if (!is_rmi_feat_da_enabled()) {
634 return SMC_NOT_SUPPORTED;
635 }
636
637 if (!GRANULE_ALIGNED(pdev_ptr)) {
638 return RMI_ERROR_INPUT;
639 }
640
641 /* Lock pdev granule and map it */
642 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
643 if (g_pdev == NULL) {
644 return RMI_ERROR_INPUT;
645 }
646
647 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
648 if (pd == NULL) {
649 granule_unlock(g_pdev);
650 return RMI_ERROR_INPUT;
651 }
652
653 if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) {
654 buffer_unmap(pd);
655 granule_unlock(g_pdev);
656 return RMI_ERROR_DEVICE;
657 }
658
659 /* Map PDEV aux granules and map PDEV heap */
660 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
661 assert(aux_mapped_addr != NULL);
662
663 /* Deinit the DSM context state */
664 rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT,
665 0, 0, 0, 0);
666 assert(rc == DEV_ASSIGN_STATUS_SUCCESS);
667
668 /* Unmap all PDEV aux granules and heap */
669 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
670
671 /*
672 * Scrub PDEV AUX granules and move its state from PDEV_AUX to
673 * delegated.
674 */
675 pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true);
676
677 /* Move the PDEV granule from PDEV to delegated state */
678 granule_memzero_mapped(pd);
679 buffer_unmap(pd);
680
681 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
682
683 return RMI_SUCCESS;
684}