blob: 6e91db2c5424104db78cbdbe2f647bfe795e7489 [file] [log] [blame]
Soby Mathewe7cf1822025-04-24 07:51:33 +01001/*
2 * SPDX-License-Identifier: BSD-3-Clause
3 * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4 */
5
6#include <arch.h>
7#include <arch_features.h>
8#include <buffer.h>
9#include <debug.h>
10#include <dev.h>
11#include <dev_assign_app.h>
12#include <feature.h>
13#include <granule.h>
14#include <sizes.h>
15#include <smc-handler.h>
16#include <smc-rmi.h>
17#include <string.h>
18#include <utils_def.h>
19
20/*
21 * This function will only be invoked when the PDEV create fails or when PDEV is
22 * being destroyed. Hence the PDEV will not be in use when this function is
23 * called and therefore no lock is acquired before its invocation.
24 */
25static void pdev_restore_aux_granules_state(struct granule *pdev_aux[],
26 unsigned int cnt, bool scrub)
27{
28 for (unsigned int i = 0U; i < cnt; i++) {
29 struct granule *g_pdev_aux = pdev_aux[i];
30
31 granule_lock(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
32 if (scrub) {
33 buffer_granule_memzero(g_pdev_aux,
34 (enum buffer_slot)((unsigned int)SLOT_PDEV_AUX0 + i));
35 }
36 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_DELEGATED);
37 }
38}
39
40/*
41 * todo:
42 * Validate device specific PDEV parameters by traversing all previously created
43 * PDEVs and check against current PDEV parameters. This implements
44 * RmiPdevParamsIsValid of RMM specification.
45 */
46static int validate_rmi_pdev_params(struct rmi_pdev_params *pd_params)
47
48{
49 (void)pd_params;
50 /*
51 * Check if device identifier, Root Port identifier, IDE stream
52 * identifier, RID range are valid.
53 */
54
55 /*
56 * Check if device identifier is not equal to the device identifier of
57 * another PDEV
58 */
59
60 /* Whether RID range does not overlap the RID range of another PDEV */
61
62 /*
63 * Every address range falls within an MMIO range permitted by the system
64 */
65
66 /*
67 * None of the address ranges overlaps another address range for this
68 * PDEV
69 */
70
71 return 0;
72}
73
74/*
75 * smc_pdev_create
76 *
77 * pdev_ptr - PA of the PDEV
78 * pdev_params_ptr - PA of PDEV parameters
79 */
80unsigned long smc_pdev_create(unsigned long pdev_ptr,
81 unsigned long pdev_params_ptr)
82{
83 struct granule *g_pdev;
84 struct granule *g_pdev_params;
85 struct pdev *pd;
86 struct rmi_pdev_params pdev_params; /* this consumes 4k of stack */
87 struct granule *pdev_aux_granules[PDEV_PARAM_AUX_GRANULES_MAX];
88 bool ns_access_ok;
89 void *aux_mapped_addr;
90 struct dev_assign_params dparams;
91 unsigned long smc_rc;
92 int rc;
93
94 if (!is_rmi_feat_da_enabled()) {
95 return SMC_NOT_SUPPORTED;
96 }
97
98 if (!GRANULE_ALIGNED(pdev_ptr) ||
99 !GRANULE_ALIGNED(pdev_params_ptr)) {
100 return RMI_ERROR_INPUT;
101 }
102
103 /* Map and copy PDEV parameters */
104 g_pdev_params = find_granule(pdev_params_ptr);
105 if ((g_pdev_params == NULL) ||
106 (granule_unlocked_state(g_pdev_params) != GRANULE_STATE_NS)) {
107 return RMI_ERROR_INPUT;
108 }
109
110 ns_access_ok = ns_buffer_read(SLOT_NS, g_pdev_params, 0U,
111 sizeof(struct rmi_pdev_params),
112 &pdev_params);
113 if (!ns_access_ok) {
114 return RMI_ERROR_INPUT;
115 }
116
117 /*
118 * Validate RmiPdevFlags. RMM supports PCIE off-chip device represented
119 * by flags: SPDM=true, IDE=true, COHERENT=false, P2P= false.
120 */
121 /* coverity[uninit_use:SUPPRESS] */
122 if ((EXTRACT(RMI_PDEV_FLAGS_SPDM, pdev_params.flags) !=
123 RMI_PDEV_SPDM_TRUE) ||
124 (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) !=
125 RMI_PDEV_IDE_TRUE) ||
126 (EXTRACT(RMI_PDEV_FLAGS_COHERENT, pdev_params.flags) !=
127 RMI_PDEV_COHERENT_FALSE) ||
128 (EXTRACT(RMI_PDEV_FLAGS_P2P, pdev_params.flags) !=
129 RMI_PDEV_COHERENT_FALSE)) {
130 return RMI_ERROR_NOT_SUPPORTED;
131 }
132
133 /* Validate PDEV parameters that are not specific to a device class. */
134 /* coverity[uninit_use:SUPPRESS] */
135 if ((pdev_params.num_aux > PDEV_PARAM_AUX_GRANULES_MAX) ||
136 (pdev_params.ncoh_num_addr_range > PDEV_PARAM_NCOH_ADDR_RANGE_MAX)) {
137 return RMI_ERROR_INPUT;
138 }
139
140 /* Validate hash algorithm */
141 /* coverity[uninit_use:SUPPRESS] */
142 if ((pdev_params.hash_algo != RMI_HASH_SHA_256) &&
143 (pdev_params.hash_algo != RMI_HASH_SHA_512)) {
144 return RMI_ERROR_INPUT;
145 }
146
147 /* cppcheck-suppress knownConditionTrueFalse */
148 if (validate_rmi_pdev_params(&pdev_params) != 0) {
149 return RMI_ERROR_INPUT;
150 }
151
152 /* Loop through pdev_aux_granules and transit them */
153 for (unsigned int i = 0U; i < pdev_params.num_aux; i++) {
154 struct granule *g_pdev_aux;
155
156 /* coverity[uninit_use_in_call:SUPPRESS] */
157 g_pdev_aux = find_lock_granule(pdev_params.aux[i],
158 GRANULE_STATE_DELEGATED);
159 if (g_pdev_aux == NULL) {
160 pdev_restore_aux_granules_state(pdev_aux_granules, i,
161 false);
162 return RMI_ERROR_INPUT;
163 }
164 granule_unlock_transition(g_pdev_aux, GRANULE_STATE_PDEV_AUX);
165 pdev_aux_granules[i] = g_pdev_aux;
166 }
167
168 /* Lock pdev granule and map it */
169 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_DELEGATED);
170 if (g_pdev == NULL) {
171 smc_rc = RMI_ERROR_INPUT;
172 goto out_restore_pdev_aux_granule_state;
173 }
174
175 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
176 if (pd == NULL) {
177 smc_rc = RMI_ERROR_INPUT;
178 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
179 goto out_restore_pdev_aux_granule_state;
180 }
181
182 /* Map all PDEV aux granules to slot starting SLOT_PDEV_AUX0 */
183 aux_mapped_addr = buffer_pdev_aux_granules_map(pdev_aux_granules,
184 (unsigned int)pdev_params.num_aux);
185 if (aux_mapped_addr == NULL) {
186 smc_rc = RMI_ERROR_INPUT;
187 goto out_unmap_pdev_slot_buffer;
188 }
189
190 /* Call init routine to initialize device class specific state */
191 dparams.dev_handle = (void *)pd;
192 dparams.rmi_hash_algo = pdev_params.hash_algo;
193 dparams.cert_slot_id = (uint8_t)pdev_params.cert_id;
194
195 if (EXTRACT(RMI_PDEV_FLAGS_IDE, pdev_params.flags) ==
196 RMI_PDEV_IDE_TRUE) {
197 dparams.has_ide = true;
198 dparams.ecam_addr = pdev_params.ecam_addr;
199 dparams.rp_id = pdev_params.root_id;
200 dparams.ide_sid = pdev_params.ide_sid;
201 } else {
202 dparams.has_ide = false;
203 }
204 /* Use the PDEV aux pages for the DA app */
205 uintptr_t granule_pas[PDEV_PARAM_AUX_GRANULES_MAX];
206
207 for (unsigned int i = 0; i < pdev_params.num_aux; ++i) {
208 granule_pas[i] = granule_addr(pdev_aux_granules[i]);
209 }
210
211 rc = dev_assign_app_init(&pd->da_app_data,
212 granule_pas,
213 pdev_params.num_aux,
214 aux_mapped_addr, &dparams);
215
216 if (rc == DEV_ASSIGN_STATUS_SUCCESS) {
217 /* Initialize PDEV */
218 pd->g_pdev = g_pdev;
219 pd->rmi_state = RMI_PDEV_STATE_NEW;
220 pd->rmi_flags = pdev_params.flags;
221 pd->num_vdevs = 0;
222 pd->rmi_hash_algo = pdev_params.hash_algo;
223 pd->num_aux = (unsigned int)pdev_params.num_aux;
224 (void)memcpy((void *)pd->g_aux, (void *)pdev_aux_granules, pdev_params.num_aux *
225 sizeof(struct granule *));
226
227 /* Initialize PDEV communication state */
228 pd->dev_comm_state = DEV_COMM_PENDING;
229
230 /* Initialize PDEV pcie device */
231 pd->dev.bdf = pdev_params.pdev_id;
232 pd->dev.segment_id = pdev_params.segment_id;
233 pd->dev.ecam_addr = pdev_params.ecam_addr;
234 pd->dev.root_id = pdev_params.root_id;
235 pd->dev.cert_slot_id = pdev_params.cert_id;
236 pd->dev.ide_sid = pdev_params.ide_sid;
237 pd->dev.rid_base = pdev_params.rid_base;
238 pd->dev.rid_top = pdev_params.rid_top;
239 pd->dev.ncoh_num_addr_range = pdev_params.ncoh_num_addr_range;
240 (void)memcpy(&pd->dev.ncoh_addr_range,
241 &pdev_params.ncoh_addr_range,
242 (sizeof(struct rmi_address_range) *
243 pdev_params.ncoh_num_addr_range));
244
245 smc_rc = RMI_SUCCESS;
246 } else {
247 smc_rc = RMI_ERROR_INPUT;
248 }
249
250 /* Unmap all PDEV aux granules */
251 buffer_pdev_aux_unmap(aux_mapped_addr, (unsigned int)pdev_params.num_aux);
252
253out_unmap_pdev_slot_buffer:
254 /* unmap PDEV buffer from slot PDEV */
255 buffer_unmap(pd);
256
257 /*
258 * On success, unlock and transit the PDEV granule state to
259 * GRANULE_STATE_PDEV else unlock and retain the state at
260 * GRANULE_STATE_DELEGATED.
261 */
262 if (smc_rc == RMI_SUCCESS) {
263 granule_unlock_transition(g_pdev, GRANULE_STATE_PDEV);
264 } else {
265 granule_unlock_transition(g_pdev, GRANULE_STATE_DELEGATED);
266 }
267
268out_restore_pdev_aux_granule_state:
269 if (smc_rc != RMI_SUCCESS) {
270 /*
271 * Transit all PDEV AUX granule state back to
272 * GRANULE_STATE_DELEGATED
273 */
274 pdev_restore_aux_granules_state(pdev_aux_granules,
275 (unsigned int)pdev_params.num_aux, false);
276 }
277
278 return smc_rc;
279}
Arunachalam Ganapathy294d1022024-07-17 12:31:26 +0100280
281/*
282 * smc_pdev_get_state
283 *
284 * Get state of a PDEV.
285 *
286 * pdev_ptr - PA of the PDEV
287 * res - SMC result
288 */
289void smc_pdev_get_state(unsigned long pdev_ptr, struct smc_result *res)
290{
291 struct granule *g_pdev;
292 struct pdev *pd;
293
294 if (!is_rmi_feat_da_enabled()) {
295 res->x[0] = SMC_NOT_SUPPORTED;
296 return;
297 }
298
299 if (!GRANULE_ALIGNED(pdev_ptr)) {
300 goto out_err_input;
301 }
302
303 /* Lock pdev granule and map it */
304 g_pdev = find_lock_granule(pdev_ptr, GRANULE_STATE_PDEV);
305 if (g_pdev == NULL) {
306 goto out_err_input;
307 }
308
309 pd = buffer_granule_map(g_pdev, SLOT_PDEV);
310 if (pd == NULL) {
311 granule_unlock(g_pdev);
312 goto out_err_input;
313 }
314
315 assert(pd->g_pdev == g_pdev);
316 assert(pd->rmi_state <= RMI_PDEV_STATE_ERROR);
317 res->x[0] = RMI_SUCCESS;
318 res->x[1] = pd->rmi_state;
319
320 buffer_unmap(pd);
321 granule_unlock(g_pdev);
322
323 return;
324
325out_err_input:
326 res->x[0] = RMI_ERROR_INPUT;
327}