blob: b48c8b6a5788a18f5320aee9e015167cd87ef54e [file] [log] [blame]
J-Alves35e61922021-05-06 10:01:05 +01001/*
2 * Copyright 2021 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
J-Alves3c282a32023-02-28 18:27:31 +00009#include "hf/check.h"
Karl Meakinbd2fc0e2024-11-26 11:21:52 +000010#include "hf/fdt.h"
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -080011#include "hf/fdt_handler.h"
12#include "hf/ffa.h"
13#include "hf/memiter.h"
J-Alves35e61922021-05-06 10:01:05 +010014#include "hf/mm.h"
15#include "hf/std.h"
Karl Meakin133ae6e2024-04-10 12:05:36 +010016#include "hf/stdout.h"
Karl Meakinbd2fc0e2024-11-26 11:21:52 +000017#include "hf/string.h"
J-Alves35e61922021-05-06 10:01:05 +010018
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -080019#include "vmapi/hf/call.h"
20
J-Alves0c029c22023-06-21 10:32:57 +010021#include "../msr.h"
J-Alves35e61922021-05-06 10:01:05 +010022#include "test/hftest.h"
23#include "test/hftest_impl.h"
J-Alves0c029c22023-06-21 10:32:57 +010024#include "test/vmapi/arch/exception_handler.h"
J-Alves4dff3e92022-05-23 11:33:52 +010025#include "test/vmapi/ffa.h"
J-Alves35e61922021-05-06 10:01:05 +010026
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -080027extern struct hftest_test hftest_begin[];
28extern struct hftest_test hftest_end[];
29
J-Alves35e61922021-05-06 10:01:05 +010030static struct hftest_context global_context;
31
J-Alves31e5c952024-07-12 09:45:05 +010032static alignas(PAGE_SIZE) uint8_t secondary_ec_stack[MAX_CPUS][PAGE_SIZE];
33
34uint8_t *hftest_get_secondary_ec_stack(size_t id)
35{
36 assert(id < MAX_CPUS);
37 return secondary_ec_stack[id];
38}
39
J-Alves35e61922021-05-06 10:01:05 +010040struct hftest_context *hftest_get_context(void)
41{
42 return &global_context;
43}
44
Kathleen Capellabed1dae2024-01-18 16:20:15 -050045static bool uint32list_has_next(const struct memiter *list)
46{
47 return memiter_size(list) > 0;
48}
49
50static void uint32list_get_next(struct memiter *list, uint32_t *out)
51{
52 uint64_t num;
53
54 CHECK(uint32list_has_next(list));
55 if (!fdt_parse_number(list, sizeof(uint32_t), &num)) {
56 return;
57 }
58
59 *out = (uint32_t)num;
60}
61
Karl Meakin1923faf2025-03-19 14:54:52 +000062[[noreturn]] void abort(void)
J-Alves35e61922021-05-06 10:01:05 +010063{
64 HFTEST_LOG("Service contained failures.");
65 /* Cause a fault, as a secondary/SP can't power down the machine. */
66 *((volatile uint8_t *)1) = 1;
67
68 /* This should never be reached, but to make the compiler happy... */
69 for (;;) {
70 }
71}
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -080072
73/** Find the service with the name passed in the arguments. */
74static hftest_test_fn find_service(struct memiter *args)
75{
76 struct memiter service_name;
77 struct hftest_test *test;
78
79 if (!memiter_parse_str(args, &service_name)) {
80 return NULL;
81 }
82
83 for (test = hftest_begin; test < hftest_end; ++test) {
84 if (test->kind == HFTEST_KIND_SERVICE &&
85 memiter_iseq(&service_name, test->name)) {
86 return test->fn;
87 }
88 }
89
90 return NULL;
91}
92
J-Alvese2354862022-12-08 17:35:51 +000093void hftest_context_init(struct hftest_context *ctx, void *send, void *recv)
94{
95 memset_s(ctx, sizeof(*ctx), 0, sizeof(*ctx));
96 ctx->abort = abort;
97 ctx->send = send;
98 ctx->recv = recv;
99}
100
J-Alves3c282a32023-02-28 18:27:31 +0000101/*
102 * Parse the FF-A partition's manifest.
103 * This function assumes the 'fdt' field of the passed 'ctx' has been
104 * initialized.
105 * TODO: Parse other fields as needed.
106 */
Madhukar Pappireddyae519e12024-08-20 16:16:23 -0500107void hftest_parse_ffa_manifest(struct hftest_context *ctx, struct fdt *fdt)
J-Alves3c282a32023-02-28 18:27:31 +0000108{
109 struct fdt_node root;
110 struct fdt_node ffa_node;
111 struct string mem_region_node_name = STRING_INIT("memory-regions");
Madhukar Pappireddy89226712024-01-31 14:34:46 -0600112 struct string dev_region_node_name = STRING_INIT("device-regions");
Kathleen Capellabed1dae2024-01-18 16:20:15 -0500113 struct memiter uuid;
Karl Meakinbd2fc0e2024-11-26 11:21:52 +0000114 struct memiter description;
Kathleen Capellabed1dae2024-01-18 16:20:15 -0500115 uint32_t uuid_word = 0;
116 uint16_t j = 0;
117 uint16_t i = 0;
J-Alves3c282a32023-02-28 18:27:31 +0000118 uint64_t number;
119
120 CHECK(ctx != NULL);
121 CHECK(fdt != NULL);
122
123 ASSERT_TRUE(fdt_find_node(fdt, "/", &root));
124 EXPECT_TRUE(fdt_is_compatible(&root, "arm,ffa-manifest-1.0"));
125 ASSERT_TRUE(fdt_read_number(&root, "load-address",
126 &ctx->partition_manifest.load_addr));
127 EXPECT_TRUE(fdt_read_number(&root, "ffa-version", &number));
Karl Meakin133ae6e2024-04-10 12:05:36 +0100128 ctx->partition_manifest.ffa_version = number;
J-Alves3c282a32023-02-28 18:27:31 +0000129
Madhukar Pappireddy538b6882024-08-20 16:50:52 -0500130 EXPECT_TRUE(fdt_read_number(&root, "execution-ctx-count", &number));
131 ctx->partition_manifest.execution_ctx_count = (uint16_t)number;
J-Alves77b4eef2024-09-11 13:34:26 +0100132
133 EXPECT_TRUE(fdt_read_number(&root, "exception-level", &number));
134 ctx->partition_manifest.run_time_el = (uint16_t)number;
135
Kathleen Capellabed1dae2024-01-18 16:20:15 -0500136 EXPECT_TRUE(fdt_read_property(&root, "uuid", &uuid));
137
138 /* Parse UUIDs and populate uuid count.*/
139 while (uint32list_has_next(&uuid) && j < PARTITION_MAX_UUIDS) {
140 while (uint32list_has_next(&uuid) && i < 4) {
141 uint32list_get_next(&uuid, &uuid_word);
142 ctx->partition_manifest.uuids[j].uuid[i] = uuid_word;
143 i++;
144 }
145
146 EXPECT_FALSE(
147 ffa_uuid_is_null(&ctx->partition_manifest.uuids[j]));
148
149 dlog_verbose(" UUID %#x-%x-%x-%x\n",
150 ctx->partition_manifest.uuids[j].uuid[0],
151 ctx->partition_manifest.uuids[j].uuid[1],
152 ctx->partition_manifest.uuids[j].uuid[2],
153 ctx->partition_manifest.uuids[j].uuid[3]);
154 j++;
155 i = 0;
156 }
157
158 ctx->partition_manifest.uuid_count = j;
159
J-Alves3c282a32023-02-28 18:27:31 +0000160 ffa_node = root;
161
162 /* Look for the memory region node. */
163 if (fdt_find_child(&ffa_node, &mem_region_node_name) &&
164 fdt_first_child(&ffa_node)) {
165 uint32_t mem_count = 0;
166
167 do {
168 struct memory_region *cur_region =
169 &ctx->partition_manifest.mem_regions[mem_count];
170 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
171 &number));
172 cur_region->page_count = (uint32_t)number;
Karl Meakinc2360152023-06-01 14:17:32 +0100173
174 if (!fdt_read_number(&ffa_node, "base-address",
175 &cur_region->base_address)) {
Davidson K8ccd2d02024-09-03 16:10:54 +0530176 EXPECT_TRUE(fdt_read_number(
177 &ffa_node,
178 "load-address-relative-offset",
179 &number));
Karl Meakinc2360152023-06-01 14:17:32 +0100180 cur_region->base_address =
181 ctx->partition_manifest.load_addr +
182 number;
Karl Meakinbd2fc0e2024-11-26 11:21:52 +0000183 cur_region->is_relative = true;
184 }
185
186 if (fdt_read_property(&ffa_node, "description",
187 &description)) {
188 EXPECT_EQ(string_init(&cur_region->description,
189 &description),
190 STRING_SUCCESS);
Karl Meakinc2360152023-06-01 14:17:32 +0100191 }
192
J-Alves3c282a32023-02-28 18:27:31 +0000193 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
194 &number));
195 cur_region->attributes = (uint32_t)number;
196 mem_count++;
197 } while (fdt_next_sibling(&ffa_node));
198
199 assert(mem_count < PARTITION_MAX_MEMORY_REGIONS);
200
201 ctx->partition_manifest.mem_region_count = mem_count;
202 }
203
Madhukar Pappireddy89226712024-01-31 14:34:46 -0600204 ffa_node = root;
205
206 /* Look for the device region node. */
207 if (fdt_find_child(&ffa_node, &dev_region_node_name) &&
208 fdt_first_child(&ffa_node)) {
209 uint32_t dev_region_count = 0;
210
211 do {
212 struct device_region *cur_region =
213 &ctx->partition_manifest
214 .dev_regions[dev_region_count];
215 EXPECT_TRUE(fdt_read_number(&ffa_node, "pages-count",
216 &number));
217 cur_region->page_count = (uint32_t)number;
218
219 if (!fdt_read_number(&ffa_node, "base-address",
220 &cur_region->base_address)) {
Davidson K8ccd2d02024-09-03 16:10:54 +0530221 EXPECT_TRUE(fdt_read_number(
222 &ffa_node,
223 "load-address-relative-offset",
224 &number));
Madhukar Pappireddy89226712024-01-31 14:34:46 -0600225 cur_region->base_address =
226 ctx->partition_manifest.load_addr +
227 number;
228 }
229
230 EXPECT_TRUE(fdt_read_number(&ffa_node, "attributes",
231 &number));
232 cur_region->attributes = (uint32_t)number;
233 dev_region_count++;
234 } while (fdt_next_sibling(&ffa_node));
235
236 assert(dev_region_count < PARTITION_MAX_DEVICE_REGIONS);
237
238 ctx->partition_manifest.dev_region_count = dev_region_count;
239 }
240
J-Alves3c282a32023-02-28 18:27:31 +0000241 ctx->is_ffa_manifest_parsed = true;
242}
243
Madhukar Pappireddy538b6882024-08-20 16:50:52 -0500244void hftest_service_set_up(struct hftest_context *ctx, struct fdt *fdt)
Daniel Boulby792b75c2023-08-09 16:53:53 +0100245{
246 struct fdt_node node;
247 struct hftest_test *hftest_info;
248
249 ASSERT_TRUE(fdt_find_node(fdt, "/", &node));
250
251 if (!fdt_find_child(&node, &(STRING_INIT("hftest-service-setup")))) {
252 return;
253 }
254
255 EXPECT_TRUE(fdt_is_compatible(&node, "arm,hftest"));
256
257 for (hftest_info = hftest_begin; hftest_info < hftest_end;
258 ++hftest_info) {
259 struct memiter data;
260 if (hftest_info->kind != HFTEST_KIND_SERVICE_SET_UP) {
261 continue;
262 }
263 if (fdt_read_property(&node, hftest_info->name, &data)) {
264 HFTEST_LOG("Running service_setup: %s\n",
265 hftest_info->name);
266 hftest_info->fn();
267 if (ctx->failures) {
268 HFTEST_LOG_FAILURE();
269 HFTEST_LOG(HFTEST_LOG_INDENT
270 "%s service_setup failed\n",
271 hftest_info->name);
272 abort();
273 }
274 } else {
275 HFTEST_LOG("Skipping service_setup: %s\n",
276 hftest_info->name);
277 }
278 }
279}
280
Karl Meakin1923faf2025-03-19 14:54:52 +0000281[[noreturn]] void hftest_service_main(const void *fdt_ptr)
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800282{
J-Alves0c029c22023-06-21 10:32:57 +0100283 struct hftest_context *ctx;
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800284 struct memiter args;
285 hftest_test_fn service;
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800286 struct ffa_value ret;
287 struct fdt fdt;
J-Alves0c029c22023-06-21 10:32:57 +0100288 const ffa_id_t own_id = hf_vm_get_id();
J-Alves4dff3e92022-05-23 11:33:52 +0100289 ffa_notifications_bitmap_t bitmap;
Karl Meakin891eb882025-01-17 19:11:20 +0000290 const struct ffa_partition_msg *message;
J-Alves0c029c22023-06-21 10:32:57 +0100291 uint32_t vcpu = get_current_vcpu_index();
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800292
Daniel Boulby792b75c2023-08-09 16:53:53 +0100293 ctx = hftest_get_context();
J-Alves0c029c22023-06-21 10:32:57 +0100294
295 /* If boot vcpu, set up mailbox and intialize context abort function. */
296 if (vcpu == 0) {
297 struct mailbox_buffers mb;
298 mb = set_up_mailbox();
299 hftest_context_init(ctx, mb.send, mb.recv);
300 }
Daniel Boulby792b75c2023-08-09 16:53:53 +0100301
302 if (!fdt_struct_from_ptr(fdt_ptr, &fdt)) {
303 HFTEST_LOG(HFTEST_LOG_INDENT "Unable to access the FDT");
304 abort();
305 }
306
307 /*
308 * The memory size argument is to be used only by VMs. It is part of
309 * the dt provided by the Hypervisor. SPs expect to receive their
310 * FF-A manifest which doesn't have a memory size field.
311 */
312 if (ffa_is_vm_id(own_id) &&
313 !fdt_get_memory_size(&fdt, &ctx->memory_size)) {
314 HFTEST_LOG_FAILURE();
315 HFTEST_LOG(HFTEST_LOG_INDENT
316 "No entry in the FDT on memory size details");
317 abort();
318 } else if (!ffa_is_vm_id(own_id)) {
319 /*
320 * It is secure partition. We are currently using the partition
321 * manifest for the SP.
322 */
323 hftest_parse_ffa_manifest(ctx, &fdt);
Karl Meakin133ae6e2024-04-10 12:05:36 +0100324 stdout_init(ctx->partition_manifest.ffa_version);
Daniel Boulby792b75c2023-08-09 16:53:53 +0100325
326 /* TODO: Determine memory size referring to the SP Pkg. */
327 ctx->memory_size = 1048576;
328 }
329
J-Alves1c56a252024-08-27 17:32:58 +0100330 /* If boot vcpu, it means it is running in RTM_INIT. */
331 if (vcpu == 0) {
332 run_service_set_up(ctx, &fdt);
333 }
Daniel Boulby792b75c2023-08-09 16:53:53 +0100334
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800335 /* Receive the name of the service to run. */
J-Alves8dacd982023-02-08 17:42:25 +0000336 ret = ffa_msg_wait();
337 EXPECT_EQ(ret.func, FFA_RUN_32);
J-Alves4dff3e92022-05-23 11:33:52 +0100338
J-Alves0c029c22023-06-21 10:32:57 +0100339 message = (struct ffa_partition_msg *)SERVICE_RECV_BUFFER();
340
J-Alves4dff3e92022-05-23 11:33:52 +0100341 /*
342 * Expect to wake up with indirect message related to the next service
343 * to be executed.
344 */
J-Alves0c029c22023-06-21 10:32:57 +0100345 ret = ffa_notification_get(own_id, vcpu,
J-Alves4dff3e92022-05-23 11:33:52 +0100346 FFA_NOTIFICATION_FLAG_BITMAP_SPM |
347 FFA_NOTIFICATION_FLAG_BITMAP_HYP);
348 ASSERT_EQ(ret.func, FFA_SUCCESS_32);
349 bitmap = ffa_notification_get_from_framework(ret);
350 ASSERT_TRUE(is_ffa_spm_buffer_full_notification(bitmap) ||
351 is_ffa_hyp_buffer_full_notification(bitmap));
Karl Meakin9ca05512024-11-29 17:02:32 +0000352 ASSERT_EQ(own_id, message->header.receiver);
J-Alves77b4eef2024-09-11 13:34:26 +0100353
354 if (ctx->is_ffa_manifest_parsed &&
355 ctx->partition_manifest.run_time_el == S_EL1) {
356 ASSERT_EQ(hf_interrupt_get(), HF_NOTIFICATION_PENDING_INTID);
357 }
358
Karl Meakin891eb882025-01-17 19:11:20 +0000359 memiter_init(&args, ffa_partition_msg_payload_const(message),
360 message->header.size);
J-Alves4dff3e92022-05-23 11:33:52 +0100361
362 /* Find service handler. */
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800363 service = find_service(&args);
364 EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
365
366 /* Check the service was found. */
367 if (service == NULL) {
368 HFTEST_LOG_FAILURE();
369 HFTEST_LOG(HFTEST_LOG_INDENT
370 "Unable to find requested service");
371 abort();
372 }
373
Raghu Krishnamurthy50af6602021-12-12 15:23:09 -0800374 /* Pause so the next time cycles are given the service will be run. */
375 ffa_yield();
376
377 /* Let the service run. */
378 service();
379
380 /* Cleanly handle it if the service returns. */
381 if (ctx->failures) {
382 abort();
383 }
384
385 for (;;) {
386 /* Hang if the service returns. */
387 }
388}
Madhukar Pappireddy29235ec2022-12-22 10:18:41 -0600389
J-Alves19e20cf2023-08-02 12:48:55 +0100390ffa_id_t hftest_get_dir_req_source_id(void)
Madhukar Pappireddy29235ec2022-12-22 10:18:41 -0600391{
392 struct hftest_context *ctx = hftest_get_context();
393 return ctx->dir_req_source_id;
394}
395
J-Alves19e20cf2023-08-02 12:48:55 +0100396void hftest_set_dir_req_source_id(ffa_id_t id)
Madhukar Pappireddy29235ec2022-12-22 10:18:41 -0600397{
398 struct hftest_context *ctx = hftest_get_context();
399 ctx->dir_req_source_id = id;
400}
Madhukar Pappireddyae519e12024-08-20 16:16:23 -0500401
402void hftest_map_device_regions(struct hftest_context *ctx)
403{
404 struct device_region *dev_region;
405 uint32_t dev_region_count;
406
407 /*
408 * The running partition must have received and parsed its own
409 * partition manifest by now.
410 */
411 if (!ctx || !ctx->is_ffa_manifest_parsed) {
412 panic("Partition manifest not parsed.\n");
413 }
414
415 dev_region_count = ctx->partition_manifest.dev_region_count;
416
417 /* Map the MMIO address space of the devices. */
418 for (uint32_t i = 0; i < dev_region_count; i++) {
419 dev_region = &ctx->partition_manifest.dev_regions[i];
420
421 hftest_mm_identity_map(
422 // NOLINTNEXTLINE(performance-no-int-to-ptr)
423 (const void *)dev_region->base_address,
424 dev_region->page_count * PAGE_SIZE,
425 dev_region->attributes);
426 }
427}