blob: a3132672d05b869640abc677b5bc5c10172b6094 [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2018 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/ffa.h"
18
19#include "hf/mm.h"
20#include "hf/static_assert.h"
21
22#include "vmapi/hf/call.h"
23
24#include "test/hftest.h"
25#include "test/vmapi/ffa.h"
26
27static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
28static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
29static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
30static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
31
32static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
33static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
34
35struct mailbox_buffers set_up_mailbox(void)
36{
37 ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
38 FFA_SUCCESS_32);
39 return (struct mailbox_buffers){
40 .send = send_page,
41 .recv = recv_page,
42 };
43}
44
45/*
46 * Helper function to send memory to a VM then send a message with the retrieve
47 * request it needs to retrieve it.
48 */
49ffa_memory_handle_t send_memory_and_retrieve_request(
50 uint32_t share_func, void *tx_buffer, ffa_vm_id_t sender,
51 ffa_vm_id_t recipient,
52 struct ffa_memory_region_constituent constituents[],
53 uint32_t constituent_count, ffa_memory_region_flags_t flags,
54 enum ffa_data_access send_data_access,
55 enum ffa_data_access retrieve_data_access,
56 enum ffa_instruction_access send_instruction_access,
57 enum ffa_instruction_access retrieve_instruction_access)
58{
59 uint32_t msg_size;
60 struct ffa_value ret;
61 ffa_memory_handle_t handle;
62
63 /* Send the memory. */
64 msg_size = ffa_memory_region_init(
65 tx_buffer, sender, recipient, constituents, constituent_count,
66 0, flags, send_data_access, send_instruction_access,
67 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
68 FFA_MEMORY_OUTER_SHAREABLE);
69 switch (share_func) {
70 case FFA_MEM_DONATE_32:
71 ret = ffa_mem_donate(msg_size, msg_size);
72 break;
73 case FFA_MEM_LEND_32:
74 ret = ffa_mem_lend(msg_size, msg_size);
75 break;
76 case FFA_MEM_SHARE_32:
77 ret = ffa_mem_share(msg_size, msg_size);
78 break;
79 default:
80 FAIL("Invalid share_func %#x.\n", share_func);
81 /* Never reached, but needed to keep clang-analyser happy. */
82 return 0;
83 }
84 EXPECT_EQ(ret.func, FFA_SUCCESS_32);
85 handle = ffa_mem_success_handle(ret);
Andrew Walbran1bbe9402020-04-30 16:47:13 +010086 EXPECT_EQ(handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK,
87 FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010088
89 /*
90 * Send the appropriate retrieve request to the VM so that it can use it
91 * to retrieve the memory.
92 */
93 msg_size = ffa_memory_retrieve_request_init(
94 tx_buffer, handle, sender, recipient, 0, 0,
95 retrieve_data_access, retrieve_instruction_access,
96 FFA_MEMORY_NORMAL_MEM, FFA_MEMORY_CACHE_WRITE_BACK,
97 FFA_MEMORY_OUTER_SHAREABLE);
98 EXPECT_EQ(ffa_msg_send(sender, recipient, msg_size, 0).func,
99 FFA_SUCCESS_32);
100
101 return handle;
102}
103
104/*
105 * Use the retrieve request from the receive buffer to retrieve a memory region
106 * which has been sent to us. Returns the sender, and the handle via a return
107 * parameter.
108 */
109ffa_vm_id_t retrieve_memory_from_message(void *recv_buf, void *send_buf,
110 struct ffa_value msg_ret,
111 ffa_memory_handle_t *handle)
112{
113 uint32_t msg_size;
114 struct ffa_value ret;
115 struct ffa_memory_region *memory_region;
116 ffa_vm_id_t sender;
117
118 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
119 msg_size = ffa_msg_send_size(msg_ret);
120 sender = ffa_msg_send_sender(msg_ret);
121
122 if (handle != NULL) {
123 struct ffa_memory_region *retrieve_request =
124 (struct ffa_memory_region *)recv_buf;
125 *handle = retrieve_request->handle;
126 }
127 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
128 ffa_rx_release();
129 ret = ffa_mem_retrieve_req(msg_size, msg_size);
130 EXPECT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
131 memory_region = (struct ffa_memory_region *)recv_buf;
132 EXPECT_EQ(memory_region->receiver_count, 1);
133 EXPECT_EQ(memory_region->receivers[0].receiver_permissions.receiver,
134 hf_vm_get_id());
135
136 return sender;
137}
138
139/*
140 * Use the retrieve request from the receive buffer to retrieve a memory region
141 * which has been sent to us, expecting it to fail with the given error code.
142 * Returns the sender.
143 */
144ffa_vm_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
145 void *send_buf,
146 struct ffa_value msg_ret,
147 int32_t expected_error)
148{
149 uint32_t msg_size;
150 struct ffa_value ret;
151 ffa_vm_id_t sender;
152
153 EXPECT_EQ(msg_ret.func, FFA_MSG_SEND_32);
154 msg_size = ffa_msg_send_size(msg_ret);
155 sender = ffa_msg_send_sender(msg_ret);
156
157 memcpy_s(send_buf, HF_MAILBOX_SIZE, recv_buf, msg_size);
158 ffa_rx_release();
159 ret = ffa_mem_retrieve_req(msg_size, msg_size);
160 EXPECT_FFA_ERROR(ret, expected_error);
161
162 return sender;
163}