blob: ec2041ebd7875973f0752e8b32fd890dce55bd1c [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/api.h"
18#include "hf/dlog.h"
19#include "hf/spci_internal.h"
20#include "hf/std.h"
21
22/**
23 * Check if the message length and the number of memory region constituents
24 * match, if the check is correct call the memory sharing routine.
25 */
Andrew Walbran70bc8622019-10-07 14:15:58 +010026static struct spci_value spci_validate_call_share_memory(
Jose Marinho75509b42019-04-09 09:34:59 +010027 struct vm_locked to_locked, struct vm_locked from_locked,
28 struct spci_memory_region *memory_region, uint32_t memory_share_size,
29 uint32_t memory_to_attributes, enum spci_memory_share share)
30{
31 uint32_t max_count = memory_region->count;
32
33 /*
34 * Ensure the number of constituents are within the memory
35 * bounds.
36 */
37 if (memory_share_size !=
38 sizeof(struct spci_memory_region) +
39 (sizeof(struct spci_memory_region_constituent) *
40 max_count)) {
Andrew Walbran70bc8622019-10-07 14:15:58 +010041 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +010042 }
43
44 return api_spci_share_memory(to_locked, from_locked, memory_region,
45 memory_to_attributes, share);
46}
47
48/**
49 * Performs initial architected message information parsing. Calls the
50 * corresponding api functions implementing the functionality requested
51 * in the architected message.
52 */
Andrew Walbran70bc8622019-10-07 14:15:58 +010053struct spci_value spci_msg_handle_architected_message(
Jose Marinho75509b42019-04-09 09:34:59 +010054 struct vm_locked to_locked, struct vm_locked from_locked,
55 const struct spci_architected_message_header
56 *architected_message_replica,
Andrew Walbran70bc8622019-10-07 14:15:58 +010057 uint32_t size)
Jose Marinho75509b42019-04-09 09:34:59 +010058{
Andrew Walbran70bc8622019-10-07 14:15:58 +010059 struct spci_value ret;
Jose Marinho75509b42019-04-09 09:34:59 +010060 struct spci_memory_region *memory_region;
61 uint32_t to_mode;
62 uint32_t message_type;
63 uint32_t memory_share_size;
64
65 message_type = architected_message_replica->type;
66
67 switch (message_type) {
68 case SPCI_MEMORY_DONATE:
69 memory_region = (struct spci_memory_region *)
70 architected_message_replica->payload;
71
72 memory_share_size =
Andrew Walbran70bc8622019-10-07 14:15:58 +010073 size - sizeof(struct spci_architected_message_header);
Jose Marinho75509b42019-04-09 09:34:59 +010074
75 /* TODO: Add memory attributes. */
76 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
77
78 ret = spci_validate_call_share_memory(
79 to_locked, from_locked, memory_region,
80 memory_share_size, to_mode, message_type);
81 break;
82
Jose Marinho56c25732019-05-20 09:48:53 +010083 case SPCI_MEMORY_RELINQUISH:
84
85 memory_region = (struct spci_memory_region *)
86 architected_message_replica->payload;
87
88 memory_share_size =
Andrew Walbran70bc8622019-10-07 14:15:58 +010089 size - sizeof(struct spci_architected_message_header);
Jose Marinho56c25732019-05-20 09:48:53 +010090
91 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
92
93 ret = spci_validate_call_share_memory(
94 to_locked, from_locked, memory_region,
95 memory_share_size, to_mode, message_type);
96
97 break;
98
Jose Marinho713f13a2019-05-21 11:54:16 +010099 case SPCI_MEMORY_LEND: {
100 /* TODO: Add support for lend exclusive. */
101 struct spci_memory_lend *lend_descriptor;
102 uint32_t borrower_attributes;
103
104 lend_descriptor = (struct spci_memory_lend *)
105 architected_message_replica->payload;
106
107 borrower_attributes = lend_descriptor->borrower_attributes;
108
109 memory_region =
110 (struct spci_memory_region *)lend_descriptor->payload;
111 memory_share_size =
Andrew Walbran70bc8622019-10-07 14:15:58 +0100112 size - sizeof(struct spci_architected_message_header) -
Jose Marinho713f13a2019-05-21 11:54:16 +0100113 sizeof(struct spci_memory_lend);
114
115 to_mode = spci_memory_attrs_to_mode(borrower_attributes);
116
117 ret = spci_validate_call_share_memory(
118 to_locked, from_locked, memory_region,
119 memory_share_size, to_mode, message_type);
120
121 break;
122 }
123
Jose Marinho75509b42019-04-09 09:34:59 +0100124 default:
125 dlog("Invalid memory sharing message.\n");
Andrew Walbran70bc8622019-10-07 14:15:58 +0100126 return spci_error(SPCI_INVALID_PARAMETERS);
Jose Marinho75509b42019-04-09 09:34:59 +0100127 }
128
129 /* Copy data to the destination Rx. */
130 /*
131 * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
132 * Currently we assume identity mapping of the stage 2 translation.
133 * Removing this assumption relies on a mechanism to handle scenarios
134 * where the memory region fits in the source Tx buffer but cannot fit
135 * in the destination Rx buffer. This mechanism will be defined at the
136 * spec level.
137 */
Andrew Walbran70bc8622019-10-07 14:15:58 +0100138 if (ret.func == SPCI_SUCCESS_32) {
139 memcpy_s(to_locked.vm->mailbox.recv, SPCI_MSG_PAYLOAD_MAX,
140 architected_message_replica, size);
141 to_locked.vm->mailbox.recv_size = size;
142 to_locked.vm->mailbox.recv_sender = from_locked.vm->id;
143 to_locked.vm->mailbox.recv_attributes =
144 SPCI_MSG_SEND_LEGACY_MEMORY;
Jose Marinho75509b42019-04-09 09:34:59 +0100145 }
Jose Marinho75509b42019-04-09 09:34:59 +0100146
147 return ret;
148}
149
150/**
151 * Obtain the next mode to apply to the two VMs.
152 *
153 * Returns:
154 * The error code -1 indicates that a state transition was not found.
155 * Success is indicated by 0.
156 */
157static bool spci_msg_get_next_state(
158 const struct spci_mem_transitions *transitions,
159 uint32_t transition_count, uint32_t memory_to_attributes,
160 int orig_from_mode, int orig_to_mode, int *from_mode, int *to_mode)
161{
162 const uint32_t state_mask =
163 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
164 const uint32_t orig_from_state = orig_from_mode & state_mask;
165
166 for (uint32_t index = 0; index < transition_count; index++) {
167 uint32_t table_orig_from_mode =
168 transitions[index].orig_from_mode;
169 uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
170
171 if (((orig_from_state) == table_orig_from_mode) &&
172 ((orig_to_mode & state_mask) == table_orig_to_mode)) {
173 *to_mode = transitions[index].to_mode |
174 memory_to_attributes;
Jose Marinho713f13a2019-05-21 11:54:16 +0100175
176 *from_mode = transitions[index].from_mode |
177 (~state_mask & orig_from_mode);
Jose Marinho75509b42019-04-09 09:34:59 +0100178
179 return true;
180 }
181 }
182 return false;
183}
184
185/**
186 * Verify that all pages have the same mode, that the starting mode
187 * constitutes a valid state and obtain the next mode to apply
188 * to the two VMs.
189 *
190 * Returns:
191 * The error code false indicates that:
192 * 1) a state transition was not found;
193 * 2) the pages being shared do not have the same mode within the <to>
194 * or <form> VMs;
195 * 3) The beginning and end IPAs are not page aligned;
196 * 4) The requested share type was not handled.
197 * Success is indicated by true.
198 *
199 */
200bool spci_msg_check_transition(struct vm *to, struct vm *from,
201 enum spci_memory_share share,
202 int *orig_from_mode, ipaddr_t begin,
203 ipaddr_t end, uint32_t memory_to_attributes,
204 int *from_mode, int *to_mode)
205{
206 int orig_to_mode;
207 const struct spci_mem_transitions *mem_transition_table;
208 uint32_t transition_table_size;
209
210 /*
211 * TODO: Transition table does not currently consider the multiple
212 * shared case.
213 */
214 static const struct spci_mem_transitions donate_transitions[] = {
215 {
216 /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
217 .orig_from_mode = 0,
218 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
219 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
220 .to_mode = 0,
221 },
222 {
223 /* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
224 .orig_from_mode = MM_MODE_INVALID,
225 .orig_to_mode = MM_MODE_UNOWNED,
226 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
227 .to_mode = 0,
228 },
229 {
230 /* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
231 .orig_from_mode = MM_MODE_SHARED,
232 .orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
233 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
234 .to_mode = 0,
235 },
236 {
237 /*
238 * Duplicate of 1) in order to cater for an alternative
239 * representation of !O-NA:
240 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
241 * are both alternate representations of !O-NA.
242 */
243 /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
244 .orig_from_mode = 0,
245 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
246 MM_MODE_SHARED,
247 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
248 MM_MODE_SHARED,
249 .to_mode = 0,
250 },
251 };
Jose Marinho56c25732019-05-20 09:48:53 +0100252
Jose Marinho713f13a2019-05-21 11:54:16 +0100253 static const uint32_t size_donate_transitions =
254 ARRAY_SIZE(donate_transitions);
255
Jose Marinho56c25732019-05-20 09:48:53 +0100256 static const struct spci_mem_transitions relinquish_transitions[] = {
257 {
258 /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
259 .orig_from_mode = MM_MODE_UNOWNED,
260 .orig_to_mode = MM_MODE_INVALID,
261 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
262 MM_MODE_SHARED,
263 .to_mode = 0,
264 },
265 {
266 /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
267 .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
268 .orig_to_mode = MM_MODE_SHARED,
269 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
270 MM_MODE_SHARED,
271 .to_mode = 0,
272 },
273 };
274
275 static const uint32_t size_relinquish_transitions =
Jose Marinho713f13a2019-05-21 11:54:16 +0100276 ARRAY_SIZE(relinquish_transitions);
Jose Marinho56c25732019-05-20 09:48:53 +0100277
Jose Marinho713f13a2019-05-21 11:54:16 +0100278 /*
279 * This data structure holds the allowed state transitions for the "lend
280 * with shared access" state machine. In this state machine the owner
281 * keeps the lent pages mapped on its stage2 table and keeps access as
282 * well.
283 */
284 static const struct spci_mem_transitions shared_lend_transitions[] = {
285 {
286 /* 1) {O-EA, !O-NA} -> {O-SA, !O-SA} */
287 .orig_from_mode = 0,
288 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
289 MM_MODE_SHARED,
290 .from_mode = MM_MODE_SHARED,
291 .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
292 },
293 {
294 /*
295 * Duplicate of 1) in order to cater for an alternative
296 * representation of !O-NA:
297 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
298 * are both alternate representations of !O-NA.
299 */
300 /* 2) {O-EA, !O-NA} -> {O-SA, !O-SA} */
301 .orig_from_mode = 0,
302 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
303 .from_mode = MM_MODE_SHARED,
304 .to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
305 },
306 };
307
308 static const uint32_t size_shared_lend_transitions =
309 ARRAY_SIZE(shared_lend_transitions);
Jose Marinho75509b42019-04-09 09:34:59 +0100310
311 /* Fail if addresses are not page-aligned. */
312 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
313 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
314 return false;
315 }
316
Andrew Scullb5f49e02019-10-02 13:20:47 +0100317 /* Ensure that the memory range is mapped with the same mode. */
Jose Marinho75509b42019-04-09 09:34:59 +0100318 if (!mm_vm_get_mode(&from->ptable, begin, end, orig_from_mode) ||
319 !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode)) {
320 return false;
321 }
322
Andrew Scullb5f49e02019-10-02 13:20:47 +0100323 /* Ensure the address range is normal memory and not a device. */
324 if (*orig_from_mode & MM_MODE_D) {
325 return false;
326 }
327
Jose Marinho75509b42019-04-09 09:34:59 +0100328 switch (share) {
329 case SPCI_MEMORY_DONATE:
330 mem_transition_table = donate_transitions;
331 transition_table_size = size_donate_transitions;
332 break;
333
Jose Marinho56c25732019-05-20 09:48:53 +0100334 case SPCI_MEMORY_RELINQUISH:
335 mem_transition_table = relinquish_transitions;
336 transition_table_size = size_relinquish_transitions;
337 break;
338
Jose Marinho713f13a2019-05-21 11:54:16 +0100339 case SPCI_MEMORY_LEND:
340 mem_transition_table = shared_lend_transitions;
341 transition_table_size = size_shared_lend_transitions;
342 break;
343
Jose Marinho75509b42019-04-09 09:34:59 +0100344 default:
345 return false;
346 }
347
348 return spci_msg_get_next_state(mem_transition_table,
349 transition_table_size,
350 memory_to_attributes, *orig_from_mode,
351 orig_to_mode, from_mode, to_mode);
352}