Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 The Hafnium Authors. |
| 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include "hf/ffa.h" |
| 10 | |
J-Alves | 8d6a35e | 2022-01-24 14:26:55 +0000 | [diff] [blame] | 11 | #include "hf/check.h" |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 12 | #include "hf/mm.h" |
| 13 | #include "hf/static_assert.h" |
| 14 | |
| 15 | #include "vmapi/hf/call.h" |
| 16 | |
| 17 | #include "test/hftest.h" |
| 18 | #include "test/vmapi/ffa.h" |
| 19 | |
| 20 | static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE]; |
| 21 | static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE]; |
| 22 | static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page."); |
| 23 | static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page."); |
| 24 | |
| 25 | static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page; |
| 26 | static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page; |
| 27 | |
| 28 | struct mailbox_buffers set_up_mailbox(void) |
| 29 | { |
| 30 | ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func, |
| 31 | FFA_SUCCESS_32); |
| 32 | return (struct mailbox_buffers){ |
| 33 | .send = send_page, |
| 34 | .recv = recv_page, |
| 35 | }; |
| 36 | } |
| 37 | |
| 38 | /* |
| 39 | * Helper function to send memory to a VM then send a message with the retrieve |
| 40 | * request it needs to retrieve it. |
| 41 | */ |
| 42 | ffa_memory_handle_t send_memory_and_retrieve_request( |
| 43 | uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender, |
| 44 | ffa_vm_id_t recipient, |
| 45 | struct ffa_memory_region_constituent constituents[], |
J-Alves | 42a6f17 | 2022-04-07 11:46:37 +0100 | [diff] [blame] | 46 | uint32_t constituent_count, ffa_memory_region_flags_t send_flags, |
| 47 | ffa_memory_region_flags_t retrieve_flags, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 48 | enum ffa_data_access send_data_access, |
| 49 | enum ffa_data_access retrieve_data_access, |
| 50 | enum ffa_instruction_access send_instruction_access, |
| 51 | enum ffa_instruction_access retrieve_instruction_access) |
| 52 | { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 53 | uint32_t total_length; |
| 54 | uint32_t fragment_length; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 55 | uint32_t msg_size; |
| 56 | struct ffa_value ret; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 57 | const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff; |
| 58 | ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 59 | ffa_memory_handle_t handle; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 60 | uint32_t remaining_constituent_count; |
| 61 | uint32_t sent_length; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 62 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 63 | /* Send the first fragment of the memory. */ |
| 64 | remaining_constituent_count = ffa_memory_region_init( |
| 65 | tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents, |
J-Alves | 42a6f17 | 2022-04-07 11:46:37 +0100 | [diff] [blame] | 66 | constituent_count, 0, send_flags, send_data_access, |
J-Alves | 807794e | 2022-06-16 13:42:47 +0100 | [diff] [blame] | 67 | send_instruction_access, |
| 68 | share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM |
| 69 | : FFA_MEMORY_NOT_SPECIFIED_MEM, |
Olivier Deprez | c3eb3b8 | 2020-10-25 07:01:48 +0100 | [diff] [blame] | 70 | FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 71 | &total_length, &fragment_length); |
| 72 | if (remaining_constituent_count == 0) { |
| 73 | EXPECT_EQ(total_length, fragment_length); |
| 74 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 75 | switch (share_func) { |
| 76 | case FFA_MEM_DONATE_32: |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 77 | ret = ffa_mem_donate(total_length, fragment_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 78 | break; |
| 79 | case FFA_MEM_LEND_32: |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 80 | ret = ffa_mem_lend(total_length, fragment_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 81 | break; |
| 82 | case FFA_MEM_SHARE_32: |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 83 | ret = ffa_mem_share(total_length, fragment_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 84 | break; |
| 85 | default: |
| 86 | FAIL("Invalid share_func %#x.\n", share_func); |
| 87 | /* Never reached, but needed to keep clang-analyser happy. */ |
| 88 | return 0; |
| 89 | } |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 90 | sent_length = fragment_length; |
| 91 | |
| 92 | /* Send the remaining fragments. */ |
| 93 | while (remaining_constituent_count != 0) { |
| 94 | dlog_verbose("%d constituents left to send.\n", |
| 95 | remaining_constituent_count); |
| 96 | EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32); |
| 97 | if (fragment_handle == INVALID_FRAGMENT_HANDLE) { |
| 98 | fragment_handle = ffa_frag_handle(ret); |
| 99 | } else { |
| 100 | EXPECT_EQ(ffa_frag_handle(ret), fragment_handle); |
| 101 | } |
| 102 | EXPECT_EQ(ret.arg3, sent_length); |
| 103 | /* Sender MBZ at virtual instance. */ |
| 104 | EXPECT_EQ(ffa_frag_sender(ret), 0); |
| 105 | |
| 106 | remaining_constituent_count = ffa_memory_fragment_init( |
| 107 | tx_buffer, HF_MAILBOX_SIZE, |
| 108 | constituents + constituent_count - |
| 109 | remaining_constituent_count, |
| 110 | remaining_constituent_count, &fragment_length); |
| 111 | |
| 112 | ret = ffa_mem_frag_tx(fragment_handle, fragment_length); |
| 113 | sent_length += fragment_length; |
| 114 | } |
| 115 | |
| 116 | EXPECT_EQ(sent_length, total_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 117 | EXPECT_EQ(ret.func, FFA_SUCCESS_32); |
| 118 | handle = ffa_mem_success_handle(ret); |
Andrew Walbran | 1bbe940 | 2020-04-30 16:47:13 +0100 | [diff] [blame] | 119 | EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK, |
| 120 | FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 121 | if (fragment_handle != INVALID_FRAGMENT_HANDLE) { |
| 122 | EXPECT_EQ(handle, fragment_handle); |
| 123 | } |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Send the appropriate retrieve request to the VM so that it can use it |
| 127 | * to retrieve the memory. |
| 128 | */ |
| 129 | msg_size = ffa_memory_retrieve_request_init( |
J-Alves | 42a6f17 | 2022-04-07 11:46:37 +0100 | [diff] [blame] | 130 | tx_buffer, handle, sender, recipient, 0, retrieve_flags, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 131 | retrieve_data_access, retrieve_instruction_access, |
| 132 | FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK, |
Olivier Deprez | c3eb3b8 | 2020-10-25 07:01:48 +0100 | [diff] [blame] | 133 | FFA_MEMORY_INNER_SHAREABLE); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 134 | EXPECT_LE(msg_size, HF_MAILBOX_SIZE); |
| 135 | EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func, |
| 136 | FFA_SUCCESS_32); |
| 137 | |
| 138 | return handle; |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * Helper function to send memory to a VM then send a message with the retrieve |
| 143 | * request it needs to retrieve it, forcing the request to be made in at least |
| 144 | * two fragments even if it could fit in one. |
| 145 | */ |
| 146 | ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented( |
| 147 | uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender, |
| 148 | ffa_vm_id_t recipient, |
| 149 | struct ffa_memory_region_constituent constituents[], |
| 150 | uint32_t constituent_count, ffa_memory_region_flags_t flags, |
| 151 | enum ffa_data_access send_data_access, |
| 152 | enum ffa_data_access retrieve_data_access, |
| 153 | enum ffa_instruction_access send_instruction_access, |
| 154 | enum ffa_instruction_access retrieve_instruction_access) |
| 155 | { |
| 156 | uint32_t total_length; |
| 157 | uint32_t fragment_length; |
| 158 | uint32_t msg_size; |
| 159 | uint32_t remaining_constituent_count; |
| 160 | struct ffa_value ret; |
| 161 | ffa_memory_handle_t handle; |
| 162 | |
| 163 | /* Send everything except the last constituent in the first fragment. */ |
| 164 | remaining_constituent_count = ffa_memory_region_init( |
| 165 | tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents, |
| 166 | constituent_count, 0, flags, send_data_access, |
J-Alves | 807794e | 2022-06-16 13:42:47 +0100 | [diff] [blame] | 167 | send_instruction_access, |
| 168 | share_func == FFA_MEM_SHARE_32 ? FFA_MEMORY_NORMAL_MEM |
| 169 | : FFA_MEMORY_NOT_SPECIFIED_MEM, |
Olivier Deprez | c3eb3b8 | 2020-10-25 07:01:48 +0100 | [diff] [blame] | 170 | FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 171 | &total_length, &fragment_length); |
| 172 | EXPECT_EQ(remaining_constituent_count, 0); |
| 173 | EXPECT_EQ(total_length, fragment_length); |
| 174 | /* Don't include the last constituent in the first fragment. */ |
| 175 | fragment_length -= sizeof(struct ffa_memory_region_constituent); |
| 176 | switch (share_func) { |
| 177 | case FFA_MEM_DONATE_32: |
| 178 | ret = ffa_mem_donate(total_length, fragment_length); |
| 179 | break; |
| 180 | case FFA_MEM_LEND_32: |
| 181 | ret = ffa_mem_lend(total_length, fragment_length); |
| 182 | break; |
| 183 | case FFA_MEM_SHARE_32: |
| 184 | ret = ffa_mem_share(total_length, fragment_length); |
| 185 | break; |
| 186 | default: |
| 187 | FAIL("Invalid share_func %#x.\n", share_func); |
| 188 | /* Never reached, but needed to keep clang-analyser happy. */ |
| 189 | return 0; |
| 190 | } |
| 191 | EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32); |
| 192 | EXPECT_EQ(ret.arg3, fragment_length); |
| 193 | /* Sender MBZ at virtual instance. */ |
| 194 | EXPECT_EQ(ffa_frag_sender(ret), 0); |
| 195 | |
| 196 | handle = ffa_frag_handle(ret); |
| 197 | |
| 198 | /* Send the last constituent in a separate fragment. */ |
| 199 | remaining_constituent_count = ffa_memory_fragment_init( |
| 200 | tx_buffer, HF_MAILBOX_SIZE, |
| 201 | &constituents[constituent_count - 1], 1, &fragment_length); |
| 202 | EXPECT_EQ(remaining_constituent_count, 0); |
| 203 | ret = ffa_mem_frag_tx(handle, fragment_length); |
| 204 | EXPECT_EQ(ret.func, FFA_SUCCESS_32); |
| 205 | EXPECT_EQ(ffa_mem_success_handle(ret), handle); |
| 206 | |
| 207 | /* |
| 208 | * Send the appropriate retrieve request to the VM so that it can use it |
| 209 | * to retrieve the memory. |
| 210 | */ |
| 211 | msg_size = ffa_memory_retrieve_request_init( |
| 212 | tx_buffer, handle, sender, recipient, 0, 0, |
| 213 | retrieve_data_access, retrieve_instruction_access, |
| 214 | FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK, |
Olivier Deprez | c3eb3b8 | 2020-10-25 07:01:48 +0100 | [diff] [blame] | 215 | FFA_MEMORY_INNER_SHAREABLE); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 216 | EXPECT_LE(msg_size, HF_MAILBOX_SIZE); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 217 | EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func, |
| 218 | FFA_SUCCESS_32); |
| 219 | |
| 220 | return handle; |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Use the retrieve request from the receive buffer to retrieve a memory region |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 225 | * which has been sent to us. Copies all the fragments into the provided buffer |
| 226 | * if any, and checks that the total length of all fragments is no more than |
| 227 | * `memory_region_max_size`. Returns the sender, and the handle via a return |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 228 | * parameter. |
| 229 | */ |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 230 | ffa_vm_id_t retrieve_memory_from_message( |
| 231 | void *recv_buf, void *send_buf, struct ffa_value msg_ret, |
| 232 | ffa_memory_handle_t *handle, |
| 233 | struct ffa_memory_region *memory_region_ret, |
| 234 | size_t memory_region_max_size) |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 235 | { |
| 236 | uint32_t msg_size; |
| 237 | struct ffa_value ret; |
| 238 | struct ffa_memory_region *memory_region; |
| 239 | ffa_vm_id_t sender; |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 240 | struct ffa_memory_region *retrieve_request; |
| 241 | ffa_memory_handle_t handle_; |
| 242 | uint32_t fragment_length; |
| 243 | uint32_t total_length; |
| 244 | uint32_t fragment_offset; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 245 | |
| 246 | EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32); |
| 247 | msg_size = ffa_msg_send_size(msg_ret); |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 248 | sender = ffa_sender(msg_ret); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 249 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 250 | retrieve_request = (struct ffa_memory_region *)recv_buf; |
| 251 | handle_ = retrieve_request->handle; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 252 | if (handle != NULL) { |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 253 | *handle = handle_; |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 254 | } |
| 255 | memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size); |
| 256 | ffa_rx_release(); |
| 257 | ret = ffa_mem_retrieve_req(msg_size, msg_size); |
| 258 | EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 259 | total_length = ret.arg1; |
| 260 | fragment_length = ret.arg2; |
| 261 | EXPECT_GE(fragment_length, |
| 262 | sizeof(struct ffa_memory_region) + |
| 263 | sizeof(struct ffa_memory_access) + |
| 264 | sizeof(struct ffa_composite_memory_region)); |
| 265 | EXPECT_LE(fragment_length, HF_MAILBOX_SIZE); |
| 266 | EXPECT_LE(fragment_length, total_length); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 267 | memory_region = (struct ffa_memory_region *)recv_buf; |
| 268 | EXPECT_EQ(memory_region->receiver_count, 1); |
| 269 | EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver, |
| 270 | hf_vm_get_id()); |
| 271 | |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 272 | /* Copy into the return buffer. */ |
| 273 | if (memory_region_ret != NULL) { |
| 274 | memcpy_s(memory_region_ret, memory_region_max_size, |
| 275 | memory_region, fragment_length); |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * Release the RX buffer now that we have read everything we need from |
| 280 | * it. |
| 281 | */ |
| 282 | memory_region = NULL; |
| 283 | EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32); |
| 284 | |
| 285 | /* Retrieve the remaining fragments. */ |
| 286 | fragment_offset = fragment_length; |
| 287 | while (fragment_offset < total_length) { |
| 288 | ret = ffa_mem_frag_rx(handle_, fragment_offset); |
| 289 | EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32); |
| 290 | EXPECT_EQ(ffa_frag_handle(ret), handle_); |
| 291 | /* Sender MBZ at virtual instance. */ |
| 292 | EXPECT_EQ(ffa_frag_sender(ret), 0); |
| 293 | fragment_length = ret.arg3; |
| 294 | EXPECT_GT(fragment_length, 0); |
| 295 | ASSERT_LE(fragment_offset + fragment_length, |
| 296 | memory_region_max_size); |
| 297 | if (memory_region_ret != NULL) { |
| 298 | memcpy_s((uint8_t *)memory_region_ret + fragment_offset, |
| 299 | memory_region_max_size - fragment_offset, |
| 300 | recv_buf, fragment_length); |
| 301 | } |
| 302 | fragment_offset += fragment_length; |
| 303 | EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32); |
| 304 | } |
| 305 | EXPECT_EQ(fragment_offset, total_length); |
| 306 | |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 307 | return sender; |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * Use the retrieve request from the receive buffer to retrieve a memory region |
| 312 | * which has been sent to us, expecting it to fail with the given error code. |
| 313 | * Returns the sender. |
| 314 | */ |
| 315 | ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf, |
| 316 | void *send_buf, |
| 317 | struct ffa_value msg_ret, |
| 318 | int32_t expected_error) |
| 319 | { |
| 320 | uint32_t msg_size; |
| 321 | struct ffa_value ret; |
| 322 | ffa_vm_id_t sender; |
| 323 | |
| 324 | EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32); |
| 325 | msg_size = ffa_msg_send_size(msg_ret); |
J-Alves | d6f4e14 | 2021-03-05 13:33:59 +0000 | [diff] [blame] | 326 | sender = ffa_sender(msg_ret); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 327 | |
| 328 | memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size); |
| 329 | ffa_rx_release(); |
| 330 | ret = ffa_mem_retrieve_req(msg_size, msg_size); |
| 331 | EXPECT_FFA_ERROR(ret, expected_error); |
| 332 | |
| 333 | return sender; |
| 334 | } |
J-Alves | 8d6a35e | 2022-01-24 14:26:55 +0000 | [diff] [blame] | 335 | |
| 336 | ffa_vm_count_t get_ffa_partition_info(struct ffa_uuid *uuid, |
| 337 | struct ffa_partition_info *info, |
| 338 | size_t info_size) |
| 339 | { |
| 340 | struct ffa_value ret; |
| 341 | struct ffa_partition_info *ret_info = set_up_mailbox().recv; |
| 342 | |
| 343 | CHECK(uuid != NULL); |
| 344 | CHECK(info != NULL); |
| 345 | |
| 346 | ffa_version(MAKE_FFA_VERSION(1, 1)); |
| 347 | |
| 348 | ret = ffa_partition_info_get(uuid, 0); |
| 349 | |
| 350 | if (ffa_func_id(ret) != FFA_SUCCESS_32) { |
| 351 | return 0; |
| 352 | } |
| 353 | |
| 354 | if (ret.arg2 != 0) { |
| 355 | size_t src_size = ret.arg2 * sizeof(struct ffa_partition_info); |
| 356 | size_t dest_size = |
| 357 | info_size * sizeof(struct ffa_partition_info); |
| 358 | |
| 359 | memcpy_s(info, dest_size, ret_info, src_size); |
| 360 | } |
| 361 | |
| 362 | ffa_rx_release(); |
| 363 | |
| 364 | return ret.arg2; |
| 365 | } |
J-Alves | 57caa38 | 2022-01-27 13:54:50 +0000 | [diff] [blame] | 366 | |
| 367 | /** |
| 368 | * Dump the boot information passed to the partition. |
| 369 | */ |
| 370 | void dump_boot_info(struct ffa_boot_info_header *boot_info_header) |
| 371 | { |
| 372 | struct ffa_boot_info_desc *boot_info_desc; |
| 373 | |
| 374 | if (boot_info_header == NULL) { |
| 375 | HFTEST_LOG("SP doesn't have boot arguments!\n"); |
| 376 | return; |
| 377 | } |
| 378 | |
| 379 | HFTEST_LOG("SP boot info (%x):", (uintptr_t)boot_info_header); |
| 380 | HFTEST_LOG(" Signature: %x", boot_info_header->signature); |
| 381 | HFTEST_LOG(" Version: %x", boot_info_header->version); |
| 382 | HFTEST_LOG(" Blob Size: %u", boot_info_header->info_blob_size); |
| 383 | HFTEST_LOG(" Descriptor Size: %u", boot_info_header->desc_size); |
| 384 | HFTEST_LOG(" Descriptor Count: %u", boot_info_header->desc_count); |
| 385 | |
| 386 | boot_info_desc = boot_info_header->boot_info; |
| 387 | |
| 388 | if (boot_info_desc == NULL) { |
| 389 | dlog_error("Boot data arguments error..."); |
| 390 | return; |
| 391 | } |
| 392 | |
| 393 | for (uint32_t i = 0; i < boot_info_header->desc_count; i++) { |
| 394 | HFTEST_LOG(" Type: %u", boot_info_desc[i].type); |
| 395 | HFTEST_LOG(" Flags:"); |
| 396 | HFTEST_LOG(" Name Format: %x", |
| 397 | ffa_boot_info_name_format(&boot_info_desc[i])); |
| 398 | HFTEST_LOG(" Content Format: %x", |
| 399 | ffa_boot_info_content_format(&boot_info_desc[i])); |
| 400 | HFTEST_LOG(" Size: %u", boot_info_desc[i].size); |
| 401 | HFTEST_LOG(" Value: %x", boot_info_desc[i].content); |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | /** |
| 406 | * Retrieve the boot info descriptor related to the provided type and type ID. |
| 407 | */ |
| 408 | struct ffa_boot_info_desc *get_boot_info_desc( |
| 409 | struct ffa_boot_info_header *boot_info_header, uint8_t type, |
| 410 | uint8_t type_id) |
| 411 | { |
| 412 | struct ffa_boot_info_desc *boot_info_desc; |
| 413 | |
| 414 | assert(boot_info_header != NULL); |
| 415 | |
| 416 | ASSERT_EQ(boot_info_header->signature, 0xFFAU); |
| 417 | ASSERT_EQ(boot_info_header->version, 0x10001U); |
| 418 | ASSERT_EQ(boot_info_header->desc_size, |
| 419 | sizeof(struct ffa_boot_info_desc)); |
| 420 | ASSERT_EQ((uintptr_t)boot_info_header + boot_info_header->desc_offset, |
| 421 | (uintptr_t)boot_info_header->boot_info); |
| 422 | |
| 423 | boot_info_desc = boot_info_header->boot_info; |
| 424 | |
| 425 | for (uint32_t i = 0; i < boot_info_header->desc_count; i++) { |
| 426 | if (ffa_boot_info_type_id(&boot_info_desc[i]) == type_id && |
| 427 | ffa_boot_info_type(&boot_info_desc[i]) == type) { |
| 428 | return &boot_info_desc[i]; |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | return NULL; |
| 433 | } |