Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 1 | /* |
Shruti Gupta | 38133fa | 2023-04-19 17:00:38 +0100 | [diff] [blame] | 2 | * Copyright (c) 2021-2023, Arm Limited. All rights reserved. |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: BSD-3-Clause |
| 5 | */ |
| 6 | |
| 7 | #ifndef SPM_COMMON_H |
| 8 | #define SPM_COMMON_H |
| 9 | |
J-Alves | 79c08f1 | 2021-10-27 15:15:16 +0100 | [diff] [blame] | 10 | #include <plat/common/platform.h> |
| 11 | |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 12 | #include <stdint.h> |
| 13 | #include <string.h> |
| 14 | |
J-Alves | 79c08f1 | 2021-10-27 15:15:16 +0100 | [diff] [blame] | 15 | #include <ffa_helpers.h> |
| 16 | |
Olivier Deprez | 569be40 | 2022-07-08 10:24:39 +0200 | [diff] [blame] | 17 | #include <lib/extensions/sve.h> |
| 18 | |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 19 | /* Hypervisor ID at physical FFA instance */ |
| 20 | #define HYP_ID (0) |
Daniel Boulby | 198deda | 2021-03-03 11:35:25 +0000 | [diff] [blame] | 21 | /* SPMC ID */ |
| 22 | #define SPMC_ID U(0x8000) |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 23 | |
Olivier Deprez | 6967c24 | 2021-04-09 09:24:08 +0200 | [diff] [blame] | 24 | /* ID for the first Secure Partition. */ |
| 25 | #define SPM_VM_ID_FIRST SP_ID(1) |
| 26 | |
Madhukar Pappireddy | e9c1812 | 2024-09-10 16:28:48 -0500 | [diff] [blame] | 27 | #define TIMER_VIRTUAL_INTID U(3) |
| 28 | |
Manish Pandey | f7aafef | 2021-03-03 11:31:47 +0000 | [diff] [blame] | 29 | /* INTID for the managed exit virtual interrupt. */ |
| 30 | #define MANAGED_EXIT_INTERRUPT_ID U(4) |
| 31 | |
J-Alves | 4439ece | 2021-11-05 11:52:54 +0000 | [diff] [blame] | 32 | /* INTID for the notification pending interrupt. */ |
| 33 | #define NOTIFICATION_PENDING_INTERRUPT_INTID 5 |
| 34 | |
Raghu Krishnamurthy | 9e267a0 | 2022-08-11 21:25:26 -0700 | [diff] [blame] | 35 | /* Interrupt used for testing extended SPI handling. */ |
| 36 | #define IRQ_ESPI_TEST_INTID 5000 |
| 37 | |
Manish Pandey | 58971b6 | 2020-09-21 21:10:38 +0100 | [diff] [blame] | 38 | /** IRQ/FIQ pin used for signaling a virtual interrupt. */ |
| 39 | enum interrupt_pin { |
| 40 | INTERRUPT_TYPE_IRQ, |
| 41 | INTERRUPT_TYPE_FIQ, |
| 42 | }; |
| 43 | |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 44 | /* |
| 45 | * The bit 15 of the FF-A ID indicates whether the partition is executing |
| 46 | * in the normal world, in case it is a Virtual Machine (VM); or in the |
| 47 | * secure world, in case it is a Secure Partition (SP). |
| 48 | * |
| 49 | * If bit 15 is set partition is an SP; if bit 15 is clear partition is |
| 50 | * a VM. |
| 51 | */ |
| 52 | #define SP_ID_MASK U(1 << 15) |
| 53 | #define SP_ID(x) ((x) | SP_ID_MASK) |
J-Alves | d5d8715 | 2021-10-29 11:48:37 +0100 | [diff] [blame] | 54 | #define VM_ID(x) (x & ~SP_ID_MASK) |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 55 | #define IS_SP_ID(x) ((x & SP_ID_MASK) != 0U) |
| 56 | |
Daniel Boulby | 4a2888a | 2022-05-31 16:07:36 +0100 | [diff] [blame] | 57 | #define NULL_UUID (const struct ffa_uuid) { .uuid = {0} } |
| 58 | |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 59 | struct ffa_features_test { |
| 60 | const char *test_name; |
| 61 | unsigned int feature; |
| 62 | unsigned int expected_ret; |
Karl Meakin | 31b8177 | 2023-03-14 15:38:17 +0000 | [diff] [blame] | 63 | unsigned int param; |
Daniel Boulby | 198deda | 2021-03-03 11:35:25 +0000 | [diff] [blame] | 64 | unsigned int version_added; |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 65 | }; |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 66 | |
| 67 | struct mailbox_buffers { |
| 68 | void *recv; |
| 69 | void *send; |
| 70 | }; |
| 71 | |
| 72 | #define CONFIGURE_MAILBOX(mb_name, buffers_size) \ |
| 73 | do { \ |
| 74 | /* Declare RX/TX buffers at virtual FF-A instance */ \ |
| 75 | static struct { \ |
| 76 | uint8_t rx[buffers_size]; \ |
| 77 | uint8_t tx[buffers_size]; \ |
| 78 | } __aligned(PAGE_SIZE) mb_buffers; \ |
| 79 | mb_name.recv = (void *)mb_buffers.rx; \ |
| 80 | mb_name.send = (void *)mb_buffers.tx; \ |
| 81 | } while (false) |
| 82 | |
| 83 | #define CONFIGURE_AND_MAP_MAILBOX(mb_name, buffers_size, smc_ret) \ |
| 84 | do { \ |
| 85 | CONFIGURE_MAILBOX(mb_name, buffers_size); \ |
| 86 | smc_ret = ffa_rxtx_map( \ |
| 87 | (uintptr_t)mb_name.send, \ |
J-Alves | 43887ec | 2021-02-22 12:21:44 +0000 | [diff] [blame] | 88 | (uintptr_t)mb_name.recv, \ |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 89 | buffers_size / PAGE_SIZE \ |
| 90 | ); \ |
| 91 | } while (false) |
| 92 | |
J-Alves | 43887ec | 2021-02-22 12:21:44 +0000 | [diff] [blame] | 93 | /** |
| 94 | * Helpers to evaluate returns of FF-A calls. |
| 95 | */ |
Daniel Boulby | ce386b1 | 2022-03-29 18:36:36 +0100 | [diff] [blame] | 96 | bool is_ffa_call_error(struct ffa_value val); |
| 97 | bool is_expected_ffa_error(struct ffa_value ret, int32_t error_code); |
| 98 | bool is_ffa_direct_response(struct ffa_value ret); |
| 99 | bool is_expected_ffa_return(struct ffa_value ret, uint32_t func_id); |
| 100 | bool is_expected_cactus_response(struct ffa_value ret, uint32_t expected_resp, |
J-Alves | 227065a | 2021-03-11 10:01:36 +0000 | [diff] [blame] | 101 | uint32_t arg); |
Daniel Boulby | ce386b1 | 2022-03-29 18:36:36 +0100 | [diff] [blame] | 102 | void dump_ffa_value(struct ffa_value ret); |
J-Alves | 43887ec | 2021-02-22 12:21:44 +0000 | [diff] [blame] | 103 | |
Kathleen Capella | fb96b98 | 2024-04-25 17:09:33 -0500 | [diff] [blame^] | 104 | uint64_t ffa_get_uuid_lo(const struct ffa_uuid uuid); |
| 105 | uint64_t ffa_get_uuid_hi(const struct ffa_uuid uuid); |
| 106 | |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 107 | bool check_spmc_execution_level(void); |
| 108 | |
J-Alves | 9668ba5 | 2024-05-13 11:49:45 +0100 | [diff] [blame] | 109 | unsigned int get_ffa_feature_test_target( |
| 110 | const struct ffa_features_test **test_target); |
| 111 | bool ffa_features_test_targets(const struct ffa_features_test *targets, |
| 112 | uint32_t test_target_size); |
Olivier Deprez | 881b199 | 2020-12-01 15:34:34 +0100 | [diff] [blame] | 113 | |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 114 | /** |
| 115 | * Helper to conduct a memory retrieve. This is to be called by the receiver |
| 116 | * of a memory share operation. |
| 117 | */ |
| 118 | bool memory_retrieve(struct mailbox_buffers *mb, |
| 119 | struct ffa_memory_region **retrieved, uint64_t handle, |
Karl Meakin | 1331a8c | 2023-09-14 16:25:15 +0100 | [diff] [blame] | 120 | ffa_id_t sender, struct ffa_memory_access receivers[], |
Daniel Boulby | 3d8cd68 | 2024-07-23 14:28:15 +0100 | [diff] [blame] | 121 | uint32_t receiver_count, ffa_memory_region_flags_t flags, |
| 122 | bool is_normal_memory); |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 123 | |
J-Alves | 8035493 | 2024-10-15 11:24:27 +0100 | [diff] [blame] | 124 | bool hypervisor_retrieve_request_continue( |
| 125 | struct mailbox_buffers *mb, uint64_t handle, void *out, uint32_t out_size, |
| 126 | uint32_t total_size, uint32_t fragment_offset, bool release_rx); |
| 127 | |
Karl Meakin | 3d879b8 | 2023-06-16 10:32:08 +0100 | [diff] [blame] | 128 | bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle, |
| 129 | void *out, uint32_t out_size); |
| 130 | |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 131 | /** |
| 132 | * Helper to conduct a memory relinquish. The caller is usually the receiver, |
| 133 | * after it being done with the memory shared, identified by the 'handle'. |
| 134 | */ |
| 135 | bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle, |
Daniel Boulby | e79d207 | 2021-03-03 11:34:53 +0000 | [diff] [blame] | 136 | ffa_id_t id); |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 137 | |
| 138 | ffa_memory_handle_t memory_send( |
Karl Meakin | 0d4f5ff | 2023-10-13 20:03:16 +0100 | [diff] [blame] | 139 | void *send_buffer, uint32_t mem_func, |
| 140 | const struct ffa_memory_region_constituent *constituents, |
| 141 | uint32_t constituent_count, uint32_t remaining_constituent_count, |
| 142 | uint32_t fragment_length, uint32_t total_length, |
| 143 | struct ffa_value *ret); |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 144 | |
| 145 | ffa_memory_handle_t memory_init_and_send( |
Karl Meakin | 0d4f5ff | 2023-10-13 20:03:16 +0100 | [diff] [blame] | 146 | void *send_buffer, size_t memory_region_max_size, ffa_id_t sender, |
| 147 | struct ffa_memory_access receivers[], uint32_t receiver_count, |
Karl Meakin | 1331a8c | 2023-09-14 16:25:15 +0100 | [diff] [blame] | 148 | const struct ffa_memory_region_constituent *constituents, |
Daniel Boulby | ce386b1 | 2022-03-29 18:36:36 +0100 | [diff] [blame] | 149 | uint32_t constituents_count, uint32_t mem_func, struct ffa_value *ret); |
J-Alves | be1519a | 2021-02-19 14:33:54 +0000 | [diff] [blame] | 150 | |
Max Shvetsov | 0b7d25f | 2021-03-05 13:46:42 +0000 | [diff] [blame] | 151 | bool ffa_partition_info_helper(struct mailbox_buffers *mb, |
Karl Meakin | 0d4f5ff | 2023-10-13 20:03:16 +0100 | [diff] [blame] | 152 | const struct ffa_uuid uuid, |
| 153 | const struct ffa_partition_info *expected, |
| 154 | const uint16_t expected_size); |
nabkah01 | eb95d1a | 2022-11-06 15:18:06 +0000 | [diff] [blame] | 155 | bool enable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest); |
| 156 | bool disable_trusted_wdog_interrupt(ffa_id_t source, ffa_id_t dest); |
Max Shvetsov | 0b7d25f | 2021-03-05 13:46:42 +0000 | [diff] [blame] | 157 | |
Kathleen Capella | fb96b98 | 2024-04-25 17:09:33 -0500 | [diff] [blame^] | 158 | bool ffa_partition_info_regs_get_part_info( |
| 159 | struct ffa_value *args, uint8_t idx, |
| 160 | struct ffa_partition_info *partition_info); |
Raghu Krishnamurthy | 9f86452 | 2023-04-23 16:19:10 -0700 | [diff] [blame] | 161 | bool ffa_partition_info_regs_helper(const struct ffa_uuid uuid, |
| 162 | const struct ffa_partition_info *expected, |
| 163 | const uint16_t expected_size); |
Karl Meakin | 367ff54 | 2023-11-01 15:05:37 +0000 | [diff] [blame] | 164 | |
| 165 | struct ffa_memory_access ffa_memory_access_init_permissions_from_mem_func( |
| 166 | ffa_id_t receiver_id, |
| 167 | uint32_t mem_func); |
| 168 | |
J-Alves | 907fcef | 2024-04-08 17:32:58 +0100 | [diff] [blame] | 169 | bool receive_indirect_message(void *buffer, size_t buffer_size, void *recv, |
| 170 | ffa_id_t *sender, ffa_id_t receiver, |
| 171 | ffa_id_t own_id); |
| 172 | struct ffa_value send_indirect_message( |
| 173 | ffa_id_t from, ffa_id_t to, void *send, const void *payload, |
| 174 | size_t payload_size, uint32_t send_flags); |
Max Shvetsov | 103e056 | 2021-02-04 16:58:31 +0000 | [diff] [blame] | 175 | #endif /* SPM_COMMON_H */ |