Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 1 | /* |
| 2 | * SPDX-License-Identifier: BSD-3-Clause |
| 3 | * SPDX-FileCopyrightText: Copyright TF-RMM Contributors. |
| 4 | */ |
| 5 | |
Arunachalam Ganapathy | db98da6 | 2024-10-09 12:06:57 +0100 | [diff] [blame] | 6 | #include <app.h> |
Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 7 | #include <arch.h> |
| 8 | #include <arch_features.h> |
| 9 | #include <buffer.h> |
| 10 | #include <debug.h> |
| 11 | #include <dev.h> |
| 12 | #include <dev_assign_app.h> |
| 13 | #include <feature.h> |
| 14 | #include <granule.h> |
| 15 | #include <sizes.h> |
| 16 | #include <smc-handler.h> |
| 17 | #include <smc-rmi.h> |
| 18 | #include <string.h> |
| 19 | #include <utils_def.h> |
| 20 | |
| 21 | /* |
| 22 | * This function will only be invoked when the PDEV create fails or when PDEV is |
| 23 | * being destroyed. Hence the PDEV will not be in use when this function is |
| 24 | * called and therefore no lock is acquired before its invocation. |
| 25 | */ |
| 26 | static void pdev_restore_aux_granules_state(struct granule *pdev_aux[], |
| 27 | unsigned int cnt, bool scrub) |
| 28 | { |
| 29 | for (unsigned int i = 0U; i < cnt; i++) { |
| 30 | struct granule *g_pdev_aux = pdev_aux[i]; |
| 31 | |
| 32 | granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX); |
| 33 | if (scrub) { |
| 34 | buffer_granule_memzero(g_pdev_aux, |
| 35 | (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i)); |
| 36 | } |
| 37 | granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED); |
| 38 | } |
| 39 | } |
| 40 | |
| 41 | /* |
| 42 | * todo: |
| 43 | * Validate device specific PDEV parameters by traversing all previously created |
| 44 | * PDEVs and check against current PDEV parameters. This implements |
| 45 | * RmiPdevParamsIsValid of RMM specification. |
| 46 | */ |
| 47 | static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params) |
| 48 | |
| 49 | { |
| 50 | (void)pd_params; |
| 51 | /* |
| 52 | * Check if device identifier, Root Port identifier, IDE stream |
| 53 | * identifier, RID range are valid. |
| 54 | */ |
| 55 | |
| 56 | /* |
| 57 | * Check if device identifier is not equal to the device identifier of |
| 58 | * another PDEV |
| 59 | */ |
| 60 | |
| 61 | /* Whether RID range does not overlap the RID range of another PDEV */ |
| 62 | |
| 63 | /* |
| 64 | * Every address range falls within an MMIO range permitted by the system |
| 65 | */ |
| 66 | |
| 67 | /* |
| 68 | * None of the address ranges overlaps another address range for this |
| 69 | * PDEV |
| 70 | */ |
| 71 | |
| 72 | return 0; |
| 73 | } |
| 74 | |
Arunachalam Ganapathy | 8f6f626 | 2024-10-10 11:42:00 +0100 | [diff] [blame] | 75 | static unsigned long pdev_get_aux_count_from_flags(unsigned long pdev_flags) |
| 76 | { |
| 77 | unsigned long aux_count; |
| 78 | |
| 79 | (void)pdev_flags; |
| 80 | |
| 81 | /* |
| 82 | * The current implementation requires that RMI_PDEV_SPDM_TRUE |
| 83 | * is set in the flags. |
| 84 | */ |
| 85 | assert(EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_flags) == RMI_PDEV_SPDM_TRUE); |
| 86 | |
| 87 | /* |
| 88 | * Currently, the number of pages required to instantiate an app is |
| 89 | * hardcoded in the app header. In this implementation, aux_count |
| 90 | * does not depend on the flags set in pdev_flags. The worst case |
| 91 | * (i.e., the most granules) is assumed. |
| 92 | */ |
| 93 | aux_count = app_get_required_granule_count(RMM_DEV_ASSIGN_APP_ID); |
| 94 | assert(aux_count <= PDEV_PARAM_AUX_GRANULES_MAX); |
| 95 | |
| 96 | return aux_count; |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | * smc_pdev_aux_count |
| 101 | * |
| 102 | * Get number of auxiliary Granules required for a PDEV. |
| 103 | * |
| 104 | * flags - PDEV flags |
| 105 | * res - SMC result |
| 106 | */ |
| 107 | void smc_pdev_aux_count(unsigned long flags, struct smc_result *res) |
| 108 | { |
| 109 | if (is_rmi_feat_da_enabled()) { |
| 110 | res->x[0] = RMI_SUCCESS; |
| 111 | res->x[1] = pdev_get_aux_count_from_flags(flags); |
| 112 | } else { |
| 113 | res->x[0] = SMC_NOT_SUPPORTED; |
| 114 | } |
| 115 | } |
| 116 | |
Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 117 | /* |
| 118 | * smc_pdev_create |
| 119 | * |
| 120 | * pdev_ptr - PA of the PDEV |
| 121 | * pdev_params_ptr - PA of PDEV parameters |
| 122 | */ |
| 123 | unsigned long smc_pdev_create(unsigned long pdev_ptr, |
| 124 | unsigned long pdev_params_ptr) |
| 125 | { |
| 126 | struct granule *g_pdev; |
| 127 | struct granule *g_pdev_params; |
| 128 | struct pdev *pd; |
| 129 | struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */ |
| 130 | struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX]; |
Arunachalam Ganapathy | 8f6f626 | 2024-10-10 11:42:00 +0100 | [diff] [blame] | 131 | unsigned long num_aux_req; |
Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 132 | bool ns_access_ok; |
| 133 | void *aux_mapped_addr; |
| 134 | struct dev_assign_params dparams; |
| 135 | unsigned long smc_rc; |
| 136 | int rc; |
| 137 | |
| 138 | if (!is_rmi_feat_da_enabled()) { |
| 139 | return SMC_NOT_SUPPORTED; |
| 140 | } |
| 141 | |
| 142 | if (!GRANULE_ALIGNED(pdev_ptr) || |
| 143 | !GRANULE_ALIGNED(pdev_params_ptr)) { |
| 144 | return RMI_ERROR_INPUT; |
| 145 | } |
| 146 | |
| 147 | /* Map and copy PDEV parameters */ |
| 148 | g_pdev_params = find_granule(pdev_params_ptr); |
| 149 | if ((g_pdev_params == NULL) || |
| 150 | (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) { |
| 151 | return RMI_ERROR_INPUT; |
| 152 | } |
| 153 | |
| 154 | ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U, |
| 155 | sizeof(struct rmi_pdev_params), |
| 156 | &pdev_params); |
| 157 | if (!ns_access_ok) { |
| 158 | return RMI_ERROR_INPUT; |
| 159 | } |
| 160 | |
| 161 | /* |
| 162 | * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented |
| 163 | * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false. |
| 164 | */ |
| 165 | /* coverity[uninit_use:SUPPRESS] */ |
| 166 | if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) != |
| 167 | RMI_PDEV_SPDM_TRUE) || |
| 168 | (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) != |
| 169 | RMI_PDEV_IDE_TRUE) || |
| 170 | (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) != |
| 171 | RMI_PDEV_COHERENT_FALSE) || |
| 172 | (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) != |
| 173 | RMI_PDEV_COHERENT_FALSE)) { |
| 174 | return RMI_ERROR_NOT_SUPPORTED; |
| 175 | } |
| 176 | |
Arunachalam Ganapathy | 8f6f626 | 2024-10-10 11:42:00 +0100 | [diff] [blame] | 177 | /* Validate PDEV parameters num_aux */ |
| 178 | num_aux_req = pdev_get_aux_count_from_flags(pdev_params.flags); |
Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 179 | /* coverity[uninit_use:SUPPRESS] */ |
Arunachalam Ganapathy | 8f6f626 | 2024-10-10 11:42:00 +0100 | [diff] [blame] | 180 | if ((pdev_params.num_aux == 0U) || |
| 181 | (pdev_params.num_aux != num_aux_req)) { |
| 182 | ERROR("ERROR: PDEV need %ld aux granules, host allocated %ld.\n", |
| 183 | num_aux_req, pdev_params.num_aux); |
| 184 | return RMI_ERROR_INPUT; |
| 185 | } |
| 186 | |
| 187 | /* Validate PDEV parameters ncoh_num_addr_range. */ |
| 188 | /* coverity[uninit_use:SUPPRESS] */ |
| 189 | if (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX) { |
Soby Mathew | e7cf182 | 2025-04-24 07:51:33 +0100 | [diff] [blame] | 190 | return RMI_ERROR_INPUT; |
| 191 | } |
| 192 | |
| 193 | /* Validate hash algorithm */ |
| 194 | /* coverity[uninit_use:SUPPRESS] */ |
| 195 | if ((pdev_params.hash_algo != RMI_HASH_SHA_256) && |
| 196 | (pdev_params.hash_algo != RMI_HASH_SHA_512)) { |
| 197 | return RMI_ERROR_INPUT; |
| 198 | } |
| 199 | |
| 200 | /* cppcheck-suppress knownConditionTrueFalse */ |
| 201 | if (validate_rmi_pdev_params(&pdev_params) != 0) { |
| 202 | return RMI_ERROR_INPUT; |
| 203 | } |
| 204 | |
| 205 | /* Loop through pdev_aux_granules and transit them */ |
| 206 | for (unsigned int i = 0U; i < pdev_params.num_aux; i++) { |
| 207 | struct granule *g_pdev_aux; |
| 208 | |
| 209 | /* coverity[uninit_use_in_call:SUPPRESS] */ |
| 210 | g_pdev_aux = find_lock_granule(pdev_params.aux[i], |
| 211 | GRANULE_STATE_DELEGATED); |
| 212 | if (g_pdev_aux == NULL) { |
| 213 | pdev_restore_aux_granules_state(pdev_aux_granules, i, |
| 214 | false); |
| 215 | return RMI_ERROR_INPUT; |
| 216 | } |
| 217 | granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX); |
| 218 | pdev_aux_granules[i] = g_pdev_aux; |
| 219 | } |
| 220 | |
| 221 | /* Lock pdev granule and map it */ |
| 222 | g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED); |
| 223 | if (g_pdev == NULL) { |
| 224 | smc_rc = RMI_ERROR_INPUT; |
| 225 | goto out_restore_pdev_aux_granule_state; |
| 226 | } |
| 227 | |
| 228 | pd = buffer_granule_map(g_pdev, SLOT_PDEV); |
| 229 | if (pd == NULL) { |
| 230 | smc_rc = RMI_ERROR_INPUT; |
| 231 | granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED); |
| 232 | goto out_restore_pdev_aux_granule_state; |
| 233 | } |
| 234 | |
| 235 | /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */ |
| 236 | aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules, |
| 237 | (unsigned int)pdev_params.num_aux); |
| 238 | if (aux_mapped_addr == NULL) { |
| 239 | smc_rc = RMI_ERROR_INPUT; |
| 240 | goto out_unmap_pdev_slot_buffer; |
| 241 | } |
| 242 | |
| 243 | /* Call init routine to initialize device class specific state */ |
| 244 | dparams.dev_handle = (void *)pd; |
| 245 | dparams.rmi_hash_algo = pdev_params.hash_algo; |
| 246 | dparams.cert_slot_id = (uint8_t)pdev_params.cert_id; |
| 247 | |
| 248 | if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) == |
| 249 | RMI_PDEV_IDE_TRUE) { |
| 250 | dparams.has_ide = true; |
| 251 | dparams.ecam_addr = pdev_params.ecam_addr; |
| 252 | dparams.rp_id = pdev_params.root_id; |
| 253 | dparams.ide_sid = pdev_params.ide_sid; |
| 254 | } else { |
| 255 | dparams.has_ide = false; |
| 256 | } |
| 257 | /* Use the PDEV aux pages for the DA app */ |
| 258 | uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX]; |
| 259 | |
| 260 | for (unsigned int i = 0; i < pdev_params.num_aux; ++i) { |
| 261 | granule_pas[i] = granule_addr(pdev_aux_granules[i]); |
| 262 | } |
| 263 | |
| 264 | rc = dev_assign_app_init(&pd->da_app_data, |
| 265 | granule_pas, |
| 266 | pdev_params.num_aux, |
| 267 | aux_mapped_addr, &dparams); |
| 268 | |
| 269 | if (rc == DEV_ASSIGN_STATUS_SUCCESS) { |
| 270 | /* Initialize PDEV */ |
| 271 | pd->g_pdev = g_pdev; |
| 272 | pd->rmi_state = RMI_PDEV_STATE_NEW; |
| 273 | pd->rmi_flags = pdev_params.flags; |
| 274 | pd->num_vdevs = 0; |
| 275 | pd->rmi_hash_algo = pdev_params.hash_algo; |
| 276 | pd->num_aux = (unsigned int)pdev_params.num_aux; |
| 277 | (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux * |
| 278 | sizeof(struct granule *)); |
| 279 | |
| 280 | /* Initialize PDEV communication state */ |
| 281 | pd->dev_comm_state = DEV_COMM_PENDING; |
| 282 | |
| 283 | /* Initialize PDEV pcie device */ |
| 284 | pd->dev.bdf = pdev_params.pdev_id; |
| 285 | pd->dev.segment_id = pdev_params.segment_id; |
| 286 | pd->dev.ecam_addr = pdev_params.ecam_addr; |
| 287 | pd->dev.root_id = pdev_params.root_id; |
| 288 | pd->dev.cert_slot_id = pdev_params.cert_id; |
| 289 | pd->dev.ide_sid = pdev_params.ide_sid; |
| 290 | pd->dev.rid_base = pdev_params.rid_base; |
| 291 | pd->dev.rid_top = pdev_params.rid_top; |
| 292 | pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range; |
| 293 | (void)memcpy(&pd->dev.ncoh_addr_range, |
| 294 | &pdev_params.ncoh_addr_range, |
| 295 | (sizeof(struct rmi_address_range) * |
| 296 | pdev_params.ncoh_num_addr_range)); |
| 297 | |
| 298 | smc_rc = RMI_SUCCESS; |
| 299 | } else { |
| 300 | smc_rc = RMI_ERROR_INPUT; |
| 301 | } |
| 302 | |
| 303 | /* Unmap all PDEV aux granules */ |
| 304 | buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux); |
| 305 | |
| 306 | out_unmap_pdev_slot_buffer: |
| 307 | /* unmap PDEV buffer from slot PDEV */ |
| 308 | buffer_unmap(pd); |
| 309 | |
| 310 | /* |
| 311 | * On success, unlock and transit the PDEV granule state to |
| 312 | * GRANULE_STATE_PDEV else unlock and retain the state at |
| 313 | * GRANULE_STATE_DELEGATED. |
| 314 | */ |
| 315 | if (smc_rc == RMI_SUCCESS) { |
| 316 | granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV); |
| 317 | } else { |
| 318 | granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED); |
| 319 | } |
| 320 | |
| 321 | out_restore_pdev_aux_granule_state: |
| 322 | if (smc_rc != RMI_SUCCESS) { |
| 323 | /* |
| 324 | * Transit all PDEV AUX granule state back to |
| 325 | * GRANULE_STATE_DELEGATED |
| 326 | */ |
| 327 | pdev_restore_aux_granules_state(pdev_aux_granules, |
| 328 | (unsigned int)pdev_params.num_aux, false); |
| 329 | } |
| 330 | |
| 331 | return smc_rc; |
| 332 | } |
Arunachalam Ganapathy | 294d102 | 2024-07-17 12:31:26 +0100 | [diff] [blame] | 333 | |
| 334 | /* |
| 335 | * smc_pdev_get_state |
| 336 | * |
| 337 | * Get state of a PDEV. |
| 338 | * |
| 339 | * pdev_ptr - PA of the PDEV |
| 340 | * res - SMC result |
| 341 | */ |
| 342 | void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res) |
| 343 | { |
| 344 | struct granule *g_pdev; |
| 345 | struct pdev *pd; |
| 346 | |
| 347 | if (!is_rmi_feat_da_enabled()) { |
| 348 | res->x[0] = SMC_NOT_SUPPORTED; |
| 349 | return; |
| 350 | } |
| 351 | |
| 352 | if (!GRANULE_ALIGNED(pdev_ptr)) { |
| 353 | goto out_err_input; |
| 354 | } |
| 355 | |
| 356 | /* Lock pdev granule and map it */ |
| 357 | g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV); |
| 358 | if (g_pdev == NULL) { |
| 359 | goto out_err_input; |
| 360 | } |
| 361 | |
| 362 | pd = buffer_granule_map(g_pdev, SLOT_PDEV); |
| 363 | if (pd == NULL) { |
| 364 | granule_unlock(g_pdev); |
| 365 | goto out_err_input; |
| 366 | } |
| 367 | |
| 368 | assert(pd->g_pdev == g_pdev); |
| 369 | assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR); |
| 370 | res->x[0] = RMI_SUCCESS; |
| 371 | res->x[1] = pd->rmi_state; |
| 372 | |
| 373 | buffer_unmap(pd); |
| 374 | granule_unlock(g_pdev); |
| 375 | |
| 376 | return; |
| 377 | |
| 378 | out_err_input: |
| 379 | res->x[0] = RMI_ERROR_INPUT; |
| 380 | } |
Arunachalam Ganapathy | db98da6 | 2024-10-09 12:06:57 +0100 | [diff] [blame] | 381 | |
| 382 | /* |
| 383 | * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED |
| 384 | * using RMI PDEV_DESTROY. |
| 385 | * |
| 386 | * pdev_ptr - PA of the PDEV |
| 387 | */ |
| 388 | unsigned long smc_pdev_destroy(unsigned long pdev_ptr) |
| 389 | { |
| 390 | int rc __unused; |
| 391 | struct granule *g_pdev; |
| 392 | void *aux_mapped_addr; |
| 393 | struct pdev *pd; |
| 394 | |
| 395 | if (!is_rmi_feat_da_enabled()) { |
| 396 | return SMC_NOT_SUPPORTED; |
| 397 | } |
| 398 | |
| 399 | if (!GRANULE_ALIGNED(pdev_ptr)) { |
| 400 | return RMI_ERROR_INPUT; |
| 401 | } |
| 402 | |
| 403 | /* Lock pdev granule and map it */ |
| 404 | g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV); |
| 405 | if (g_pdev == NULL) { |
| 406 | return RMI_ERROR_INPUT; |
| 407 | } |
| 408 | |
| 409 | pd = buffer_granule_map(g_pdev, SLOT_PDEV); |
| 410 | if (pd == NULL) { |
| 411 | granule_unlock(g_pdev); |
| 412 | return RMI_ERROR_INPUT; |
| 413 | } |
| 414 | |
| 415 | if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) { |
| 416 | buffer_unmap(pd); |
| 417 | granule_unlock(g_pdev); |
| 418 | return RMI_ERROR_DEVICE; |
| 419 | } |
| 420 | |
| 421 | /* Map PDEV aux granules and map PDEV heap */ |
| 422 | aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux); |
| 423 | assert(aux_mapped_addr != NULL); |
| 424 | |
| 425 | /* Deinit the DSM context state */ |
| 426 | rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT, |
| 427 | 0, 0, 0, 0); |
| 428 | assert(rc == DEV_ASSIGN_STATUS_SUCCESS); |
| 429 | |
| 430 | /* Unmap all PDEV aux granules and heap */ |
| 431 | buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux); |
| 432 | |
| 433 | /* |
| 434 | * Scrub PDEV AUX granules and move its state from PDEV_AUX to |
| 435 | * delegated. |
| 436 | */ |
| 437 | pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true); |
| 438 | |
| 439 | /* Move the PDEV granule from PDEV to delegated state */ |
| 440 | granule_memzero_mapped(pd); |
| 441 | buffer_unmap(pd); |
| 442 | |
| 443 | granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED); |
| 444 | |
| 445 | return RMI_SUCCESS; |
| 446 | } |