blob: 12d33800311d217a70a25757e5ebfdffa3972ff4 [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +01006#include <app.h>
Soby Mathewe7cf1822025-04-24 07:51:33 +01007#include <arch.h>
8#include <arch_features.h>
9#include <buffer.h>
10#include <debug.h>
11#include <dev.h>
12#include <dev_assign_app.h>
13#include <feature.h>
14#include <granule.h>
15#include <sizes.h>
16#include <smc-handler.h>
17#include <smc-rmi.h>
18#include <string.h>
19#include <utils_def.h>
20
21/*
22 * This function will only be invoked when the PDEV create fails or when PDEV is
23 * being destroyed. Hence the PDEV will not be in use when this function is
24 * called and therefore no lock is acquired before its invocation.
25 */
26static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
27 unsigned int cnt, bool scrub)
28{
29 for (unsigned int i = 0U; i < cnt; i++) {
30 struct granule *g_pdev_aux = pdev_aux[i];
31
32 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
33 if (scrub) {
34 buffer_granule_memzero(g_pdev_aux,
35 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
36 }
37 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
38 }
39}
40
41/*
42 * todo:
43 * Validate device specific PDEV parameters by traversing all previously created
44 * PDEVs and check against current PDEV parameters. This implements
45 * RmiPdevParamsIsValid of RMM specification.
46 */
47static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
48
49{
50 (void)pd_params;
51 /*
52 * Check if device identifier, Root Port identifier, IDE stream
53 * identifier, RID range are valid.
54 */
55
56 /*
57 * Check if device identifier is not equal to the device identifier of
58 * another PDEV
59 */
60
61 /* Whether RID range does not overlap the RID range of another PDEV */
62
63 /*
64 * Every address range falls within an MMIO range permitted by the system
65 */
66
67 /*
68 * None of the address ranges overlaps another address range for this
69 * PDEV
70 */
71
72 return 0;
73}
74
75/*
76 * smc_pdev_create
77 *
78 * pdev_ptr - PA of the PDEV
79 * pdev_params_ptr - PA of PDEV parameters
80 */
81unsigned long smc_pdev_create(unsigned long pdev_ptr,
82 unsigned long pdev_params_ptr)
83{
84 struct granule *g_pdev;
85 struct granule *g_pdev_params;
86 struct pdev *pd;
87 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
88 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
89 bool ns_access_ok;
90 void *aux_mapped_addr;
91 struct dev_assign_params dparams;
92 unsigned long smc_rc;
93 int rc;
94
95 if (!is_rmi_feat_da_enabled()) {
96 return SMC_NOT_SUPPORTED;
97 }
98
99 if (!GRANULE_ALIGNED(pdev_ptr) ||
100 !GRANULE_ALIGNED(pdev_params_ptr)) {
101 return RMI_ERROR_INPUT;
102 }
103
104 /* Map and copy PDEV parameters */
105 g_pdev_params = find_granule(pdev_params_ptr);
106 if ((g_pdev_params == NULL) ||
107 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
108 return RMI_ERROR_INPUT;
109 }
110
111 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
112 sizeof(struct rmi_pdev_params),
113 &pdev_params);
114 if (!ns_access_ok) {
115 return RMI_ERROR_INPUT;
116 }
117
118 /*
119 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
120 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
121 */
122 /* coverity[uninit_use:SUPPRESS] */
123 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
124 RMI_PDEV_SPDM_TRUE) ||
125 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
126 RMI_PDEV_IDE_TRUE) ||
127 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
128 RMI_PDEV_COHERENT_FALSE) ||
129 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
130 RMI_PDEV_COHERENT_FALSE)) {
131 return RMI_ERROR_NOT_SUPPORTED;
132 }
133
134 /* Validate PDEV parameters that are not specific to a device class. */
135 /* coverity[uninit_use:SUPPRESS] */
136 if ((pdev_params.num_aux > PDEV_PARAM_AUX_GRANULES_MAX) ||
137 (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX)) {
138 return RMI_ERROR_INPUT;
139 }
140
141 /* Validate hash algorithm */
142 /* coverity[uninit_use:SUPPRESS] */
143 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
144 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
145 return RMI_ERROR_INPUT;
146 }
147
148 /* cppcheck-suppress knownConditionTrueFalse */
149 if (validate_rmi_pdev_params(&pdev_params) != 0) {
150 return RMI_ERROR_INPUT;
151 }
152
153 /* Loop through pdev_aux_granules and transit them */
154 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
155 struct granule *g_pdev_aux;
156
157 /* coverity[uninit_use_in_call:SUPPRESS] */
158 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
159 GRANULE_STATE_DELEGATED);
160 if (g_pdev_aux == NULL) {
161 pdev_restore_aux_granules_state(pdev_aux_granules, i,
162 false);
163 return RMI_ERROR_INPUT;
164 }
165 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
166 pdev_aux_granules[i] = g_pdev_aux;
167 }
168
169 /* Lock pdev granule and map it */
170 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
171 if (g_pdev == NULL) {
172 smc_rc = RMI_ERROR_INPUT;
173 goto out_restore_pdev_aux_granule_state;
174 }
175
176 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
177 if (pd == NULL) {
178 smc_rc = RMI_ERROR_INPUT;
179 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
180 goto out_restore_pdev_aux_granule_state;
181 }
182
183 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
184 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
185 (unsigned int)pdev_params.num_aux);
186 if (aux_mapped_addr == NULL) {
187 smc_rc = RMI_ERROR_INPUT;
188 goto out_unmap_pdev_slot_buffer;
189 }
190
191 /* Call init routine to initialize device class specific state */
192 dparams.dev_handle = (void *)pd;
193 dparams.rmi_hash_algo = pdev_params.hash_algo;
194 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
195
196 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
197 RMI_PDEV_IDE_TRUE) {
198 dparams.has_ide = true;
199 dparams.ecam_addr = pdev_params.ecam_addr;
200 dparams.rp_id = pdev_params.root_id;
201 dparams.ide_sid = pdev_params.ide_sid;
202 } else {
203 dparams.has_ide = false;
204 }
205 /* Use the PDEV aux pages for the DA app */
206 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
207
208 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
209 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
210 }
211
212 rc = dev_assign_app_init(&pd->da_app_data,
213 granule_pas,
214 pdev_params.num_aux,
215 aux_mapped_addr, &dparams);
216
217 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
218 /* Initialize PDEV */
219 pd->g_pdev = g_pdev;
220 pd->rmi_state = RMI_PDEV_STATE_NEW;
221 pd->rmi_flags = pdev_params.flags;
222 pd->num_vdevs = 0;
223 pd->rmi_hash_algo = pdev_params.hash_algo;
224 pd->num_aux = (unsigned int)pdev_params.num_aux;
225 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
226 sizeof(struct granule *));
227
228 /* Initialize PDEV communication state */
229 pd->dev_comm_state = DEV_COMM_PENDING;
230
231 /* Initialize PDEV pcie device */
232 pd->dev.bdf = pdev_params.pdev_id;
233 pd->dev.segment_id = pdev_params.segment_id;
234 pd->dev.ecam_addr = pdev_params.ecam_addr;
235 pd->dev.root_id = pdev_params.root_id;
236 pd->dev.cert_slot_id = pdev_params.cert_id;
237 pd->dev.ide_sid = pdev_params.ide_sid;
238 pd->dev.rid_base = pdev_params.rid_base;
239 pd->dev.rid_top = pdev_params.rid_top;
240 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
241 (void)memcpy(&pd->dev.ncoh_addr_range,
242 &pdev_params.ncoh_addr_range,
243 (sizeof(struct rmi_address_range) *
244 pdev_params.ncoh_num_addr_range));
245
246 smc_rc = RMI_SUCCESS;
247 } else {
248 smc_rc = RMI_ERROR_INPUT;
249 }
250
251 /* Unmap all PDEV aux granules */
252 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
253
254out_unmap_pdev_slot_buffer:
255 /* unmap PDEV buffer from slot PDEV */
256 buffer_unmap(pd);
257
258 /*
259 * On success, unlock and transit the PDEV granule state to
260 * GRANULE_STATE_PDEV else unlock and retain the state at
261 * GRANULE_STATE_DELEGATED.
262 */
263 if (smc_rc == RMI_SUCCESS) {
264 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
265 } else {
266 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
267 }
268
269out_restore_pdev_aux_granule_state:
270 if (smc_rc != RMI_SUCCESS) {
271 /*
272 * Transit all PDEV AUX granule state back to
273 * GRANULE_STATE_DELEGATED
274 */
275 pdev_restore_aux_granules_state(pdev_aux_granules,
276 (unsigned int)pdev_params.num_aux, false);
277 }
278
279 return smc_rc;
280}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100281
282/*
283 * smc_pdev_get_state
284 *
285 * Get state of a PDEV.
286 *
287 * pdev_ptr - PA of the PDEV
288 * res - SMC result
289 */
290void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
291{
292 struct granule *g_pdev;
293 struct pdev *pd;
294
295 if (!is_rmi_feat_da_enabled()) {
296 res->x[0] = SMC_NOT_SUPPORTED;
297 return;
298 }
299
300 if (!GRANULE_ALIGNED(pdev_ptr)) {
301 goto out_err_input;
302 }
303
304 /* Lock pdev granule and map it */
305 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
306 if (g_pdev == NULL) {
307 goto out_err_input;
308 }
309
310 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
311 if (pd == NULL) {
312 granule_unlock(g_pdev);
313 goto out_err_input;
314 }
315
316 assert(pd->g_pdev == g_pdev);
317 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
318 res->x[0] = RMI_SUCCESS;
319 res->x[1] = pd->rmi_state;
320
321 buffer_unmap(pd);
322 granule_unlock(g_pdev);
323
324 return;
325
326out_err_input:
327 res->x[0] = RMI_ERROR_INPUT;
328}
Arunachalam Ganapathydb98da62024-10-09 12:06:57 +0100329
330/*
331 * Destroy a PDEV. Host can reclaim PDEV resources when the PDEV state is STOPPED
332 * using RMI PDEV_DESTROY.
333 *
334 * pdev_ptr - PA of the PDEV
335 */
336unsigned long smc_pdev_destroy(unsigned long pdev_ptr)
337{
338 int rc __unused;
339 struct granule *g_pdev;
340 void *aux_mapped_addr;
341 struct pdev *pd;
342
343 if (!is_rmi_feat_da_enabled()) {
344 return SMC_NOT_SUPPORTED;
345 }
346
347 if (!GRANULE_ALIGNED(pdev_ptr)) {
348 return RMI_ERROR_INPUT;
349 }
350
351 /* Lock pdev granule and map it */
352 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
353 if (g_pdev == NULL) {
354 return RMI_ERROR_INPUT;
355 }
356
357 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
358 if (pd == NULL) {
359 granule_unlock(g_pdev);
360 return RMI_ERROR_INPUT;
361 }
362
363 if (pd->rmi_state != RMI_PDEV_STATE_STOPPED) {
364 buffer_unmap(pd);
365 granule_unlock(g_pdev);
366 return RMI_ERROR_DEVICE;
367 }
368
369 /* Map PDEV aux granules and map PDEV heap */
370 aux_mapped_addr = buffer_pdev_aux_granules_map(pd->g_aux, pd->num_aux);
371 assert(aux_mapped_addr != NULL);
372
373 /* Deinit the DSM context state */
374 rc = (int)app_run(&pd->da_app_data, DEVICE_ASSIGN_APP_FUNC_ID_DEINIT,
375 0, 0, 0, 0);
376 assert(rc == DEV_ASSIGN_STATUS_SUCCESS);
377
378 /* Unmap all PDEV aux granules and heap */
379 buffer_pdev_aux_unmap(aux_mapped_addr, pd->num_aux);
380
381 /*
382 * Scrub PDEV AUX granules and move its state from PDEV_AUX to
383 * delegated.
384 */
385 pdev_restore_aux_granules_state(pd->g_aux, pd->num_aux, true);
386
387 /* Move the PDEV granule from PDEV to delegated state */
388 granule_memzero_mapped(pd);
389 buffer_unmap(pd);
390
391 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
392
393 return RMI_SUCCESS;
394}