blob: 0b47fb36f246be51d8d1ae3f72796edc23b070d9 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/api.h"
18#include "hf/dlog.h"
19#include "hf/spci_internal.h"
20#include "hf/std.h"
21
22/**
23 * Check if the message length and the number of memory region constituents
24 * match, if the check is correct call the memory sharing routine.
25 */
26static spci_return_t spci_validate_call_share_memory(
27 struct vm_locked to_locked, struct vm_locked from_locked,
28 struct spci_memory_region *memory_region, uint32_t memory_share_size,
29 uint32_t memory_to_attributes, enum spci_memory_share share)
30{
31 uint32_t max_count = memory_region->count;
32
33 /*
34 * Ensure the number of constituents are within the memory
35 * bounds.
36 */
37 if (memory_share_size !=
38 sizeof(struct spci_memory_region) +
39 (sizeof(struct spci_memory_region_constituent) *
40 max_count)) {
41 return SPCI_INVALID_PARAMETERS;
42 }
43
44 return api_spci_share_memory(to_locked, from_locked, memory_region,
45 memory_to_attributes, share);
46}
47
48/**
49 * Performs initial architected message information parsing. Calls the
50 * corresponding api functions implementing the functionality requested
51 * in the architected message.
52 */
53spci_return_t spci_msg_handle_architected_message(
54 struct vm_locked to_locked, struct vm_locked from_locked,
55 const struct spci_architected_message_header
56 *architected_message_replica,
57 struct spci_message *from_msg_replica, struct spci_message *to_msg)
58{
59 int64_t ret;
60 struct spci_memory_region *memory_region;
61 uint32_t to_mode;
62 uint32_t message_type;
63 uint32_t memory_share_size;
64
65 message_type = architected_message_replica->type;
66
67 switch (message_type) {
68 case SPCI_MEMORY_DONATE:
69 memory_region = (struct spci_memory_region *)
70 architected_message_replica->payload;
71
72 memory_share_size =
73 from_msg_replica->length -
74 sizeof(struct spci_architected_message_header);
75
76 /* TODO: Add memory attributes. */
77 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
78
79 ret = spci_validate_call_share_memory(
80 to_locked, from_locked, memory_region,
81 memory_share_size, to_mode, message_type);
82 break;
83
Jose Marinho56c25732019-05-20 09:48:53 +010084 case SPCI_MEMORY_RELINQUISH:
85
86 memory_region = (struct spci_memory_region *)
87 architected_message_replica->payload;
88
89 memory_share_size =
90 from_msg_replica->length -
91 sizeof(struct spci_architected_message_header);
92
93 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
94
95 ret = spci_validate_call_share_memory(
96 to_locked, from_locked, memory_region,
97 memory_share_size, to_mode, message_type);
98
99 break;
100
Jose Marinho75509b42019-04-09 09:34:59 +0100101 default:
102 dlog("Invalid memory sharing message.\n");
103 return SPCI_INVALID_PARAMETERS;
104 }
105
106 /* Copy data to the destination Rx. */
107 /*
108 * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
109 * Currently we assume identity mapping of the stage 2 translation.
110 * Removing this assumption relies on a mechanism to handle scenarios
111 * where the memory region fits in the source Tx buffer but cannot fit
112 * in the destination Rx buffer. This mechanism will be defined at the
113 * spec level.
114 */
115 if (ret == SPCI_SUCCESS) {
116 memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
117 architected_message_replica, from_msg_replica->length);
118 }
119 *to_msg = *from_msg_replica;
120
121 return ret;
122}
123
124/**
125 * Obtain the next mode to apply to the two VMs.
126 *
127 * Returns:
128 * The error code -1 indicates that a state transition was not found.
129 * Success is indicated by 0.
130 */
131static bool spci_msg_get_next_state(
132 const struct spci_mem_transitions *transitions,
133 uint32_t transition_count, uint32_t memory_to_attributes,
134 int orig_from_mode, int orig_to_mode, int *from_mode, int *to_mode)
135{
136 const uint32_t state_mask =
137 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
138 const uint32_t orig_from_state = orig_from_mode & state_mask;
139
140 for (uint32_t index = 0; index < transition_count; index++) {
141 uint32_t table_orig_from_mode =
142 transitions[index].orig_from_mode;
143 uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
144
145 if (((orig_from_state) == table_orig_from_mode) &&
146 ((orig_to_mode & state_mask) == table_orig_to_mode)) {
147 *to_mode = transitions[index].to_mode |
148 memory_to_attributes;
149 /*
150 * TODO: Change access permission assignment to cater
151 * for the lend case.
152 */
153 *from_mode = transitions[index].from_mode;
154
155 return true;
156 }
157 }
158 return false;
159}
160
161/**
162 * Verify that all pages have the same mode, that the starting mode
163 * constitutes a valid state and obtain the next mode to apply
164 * to the two VMs.
165 *
166 * Returns:
167 * The error code false indicates that:
168 * 1) a state transition was not found;
169 * 2) the pages being shared do not have the same mode within the <to>
170 * or <form> VMs;
171 * 3) The beginning and end IPAs are not page aligned;
172 * 4) The requested share type was not handled.
173 * Success is indicated by true.
174 *
175 */
176bool spci_msg_check_transition(struct vm *to, struct vm *from,
177 enum spci_memory_share share,
178 int *orig_from_mode, ipaddr_t begin,
179 ipaddr_t end, uint32_t memory_to_attributes,
180 int *from_mode, int *to_mode)
181{
182 int orig_to_mode;
183 const struct spci_mem_transitions *mem_transition_table;
184 uint32_t transition_table_size;
185
186 /*
187 * TODO: Transition table does not currently consider the multiple
188 * shared case.
189 */
190 static const struct spci_mem_transitions donate_transitions[] = {
191 {
192 /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
193 .orig_from_mode = 0,
194 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
195 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
196 .to_mode = 0,
197 },
198 {
199 /* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
200 .orig_from_mode = MM_MODE_INVALID,
201 .orig_to_mode = MM_MODE_UNOWNED,
202 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
203 .to_mode = 0,
204 },
205 {
206 /* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
207 .orig_from_mode = MM_MODE_SHARED,
208 .orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
209 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
210 .to_mode = 0,
211 },
212 {
213 /*
214 * Duplicate of 1) in order to cater for an alternative
215 * representation of !O-NA:
216 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
217 * are both alternate representations of !O-NA.
218 */
219 /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
220 .orig_from_mode = 0,
221 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
222 MM_MODE_SHARED,
223 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
224 MM_MODE_SHARED,
225 .to_mode = 0,
226 },
227 };
Jose Marinho56c25732019-05-20 09:48:53 +0100228
229 static const struct spci_mem_transitions relinquish_transitions[] = {
230 {
231 /* 1) {!O-EA, O-NA} -> {!O-NA, O-EA} */
232 .orig_from_mode = MM_MODE_UNOWNED,
233 .orig_to_mode = MM_MODE_INVALID,
234 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
235 MM_MODE_SHARED,
236 .to_mode = 0,
237 },
238 {
239 /* 2) {!O-SA, O-SA} -> {!O-NA, O-EA} */
240 .orig_from_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
241 .orig_to_mode = MM_MODE_SHARED,
242 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
243 MM_MODE_SHARED,
244 .to_mode = 0,
245 },
246 };
247
248 static const uint32_t size_relinquish_transitions =
249 sizeof(relinquish_transitions) /
250 sizeof(struct spci_mem_transitions);
251
Jose Marinho75509b42019-04-09 09:34:59 +0100252 static const uint32_t size_donate_transitions =
253 ARRAY_SIZE(donate_transitions);
254
255 /* Fail if addresses are not page-aligned. */
256 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
257 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
258 return false;
259 }
260
261 /*
262 * Ensure that the memory range is mapped with the same
263 * mode.
264 */
265 if (!mm_vm_get_mode(&from->ptable, begin, end, orig_from_mode) ||
266 !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode)) {
267 return false;
268 }
269
270 switch (share) {
271 case SPCI_MEMORY_DONATE:
272 mem_transition_table = donate_transitions;
273 transition_table_size = size_donate_transitions;
274 break;
275
Jose Marinho56c25732019-05-20 09:48:53 +0100276 case SPCI_MEMORY_RELINQUISH:
277 mem_transition_table = relinquish_transitions;
278 transition_table_size = size_relinquish_transitions;
279 break;
280
Jose Marinho75509b42019-04-09 09:34:59 +0100281 default:
282 return false;
283 }
284
285 return spci_msg_get_next_state(mem_transition_table,
286 transition_table_size,
287 memory_to_attributes, *orig_from_mode,
288 orig_to_mode, from_mode, to_mode);
289}