blob: 09055319b0719f7b35501646c7a41b9a75127b83 [file] [log] [blame]
Jose Marinho75509b42019-04-09 09:34:59 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * https://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "hf/api.h"
18#include "hf/dlog.h"
19#include "hf/spci_internal.h"
20#include "hf/std.h"
21
22/**
23 * Check if the message length and the number of memory region constituents
24 * match, if the check is correct call the memory sharing routine.
25 */
26static spci_return_t spci_validate_call_share_memory(
27 struct vm_locked to_locked, struct vm_locked from_locked,
28 struct spci_memory_region *memory_region, uint32_t memory_share_size,
29 uint32_t memory_to_attributes, enum spci_memory_share share)
30{
31 uint32_t max_count = memory_region->count;
32
33 /*
34 * Ensure the number of constituents are within the memory
35 * bounds.
36 */
37 if (memory_share_size !=
38 sizeof(struct spci_memory_region) +
39 (sizeof(struct spci_memory_region_constituent) *
40 max_count)) {
41 return SPCI_INVALID_PARAMETERS;
42 }
43
44 return api_spci_share_memory(to_locked, from_locked, memory_region,
45 memory_to_attributes, share);
46}
47
48/**
49 * Performs initial architected message information parsing. Calls the
50 * corresponding api functions implementing the functionality requested
51 * in the architected message.
52 */
53spci_return_t spci_msg_handle_architected_message(
54 struct vm_locked to_locked, struct vm_locked from_locked,
55 const struct spci_architected_message_header
56 *architected_message_replica,
57 struct spci_message *from_msg_replica, struct spci_message *to_msg)
58{
59 int64_t ret;
60 struct spci_memory_region *memory_region;
61 uint32_t to_mode;
62 uint32_t message_type;
63 uint32_t memory_share_size;
64
65 message_type = architected_message_replica->type;
66
67 switch (message_type) {
68 case SPCI_MEMORY_DONATE:
69 memory_region = (struct spci_memory_region *)
70 architected_message_replica->payload;
71
72 memory_share_size =
73 from_msg_replica->length -
74 sizeof(struct spci_architected_message_header);
75
76 /* TODO: Add memory attributes. */
77 to_mode = MM_MODE_R | MM_MODE_W | MM_MODE_X;
78
79 ret = spci_validate_call_share_memory(
80 to_locked, from_locked, memory_region,
81 memory_share_size, to_mode, message_type);
82 break;
83
84 default:
85 dlog("Invalid memory sharing message.\n");
86 return SPCI_INVALID_PARAMETERS;
87 }
88
89 /* Copy data to the destination Rx. */
90 /*
91 * TODO: Translate the <from> IPA addresses to <to> IPA addresses.
92 * Currently we assume identity mapping of the stage 2 translation.
93 * Removing this assumption relies on a mechanism to handle scenarios
94 * where the memory region fits in the source Tx buffer but cannot fit
95 * in the destination Rx buffer. This mechanism will be defined at the
96 * spec level.
97 */
98 if (ret == SPCI_SUCCESS) {
99 memcpy_s(to_msg->payload, SPCI_MSG_PAYLOAD_MAX,
100 architected_message_replica, from_msg_replica->length);
101 }
102 *to_msg = *from_msg_replica;
103
104 return ret;
105}
106
107/**
108 * Obtain the next mode to apply to the two VMs.
109 *
110 * Returns:
111 * The error code -1 indicates that a state transition was not found.
112 * Success is indicated by 0.
113 */
114static bool spci_msg_get_next_state(
115 const struct spci_mem_transitions *transitions,
116 uint32_t transition_count, uint32_t memory_to_attributes,
117 int orig_from_mode, int orig_to_mode, int *from_mode, int *to_mode)
118{
119 const uint32_t state_mask =
120 MM_MODE_INVALID | MM_MODE_UNOWNED | MM_MODE_SHARED;
121 const uint32_t orig_from_state = orig_from_mode & state_mask;
122
123 for (uint32_t index = 0; index < transition_count; index++) {
124 uint32_t table_orig_from_mode =
125 transitions[index].orig_from_mode;
126 uint32_t table_orig_to_mode = transitions[index].orig_to_mode;
127
128 if (((orig_from_state) == table_orig_from_mode) &&
129 ((orig_to_mode & state_mask) == table_orig_to_mode)) {
130 *to_mode = transitions[index].to_mode |
131 memory_to_attributes;
132 /*
133 * TODO: Change access permission assignment to cater
134 * for the lend case.
135 */
136 *from_mode = transitions[index].from_mode;
137
138 return true;
139 }
140 }
141 return false;
142}
143
144/**
145 * Verify that all pages have the same mode, that the starting mode
146 * constitutes a valid state and obtain the next mode to apply
147 * to the two VMs.
148 *
149 * Returns:
150 * The error code false indicates that:
151 * 1) a state transition was not found;
152 * 2) the pages being shared do not have the same mode within the <to>
153 * or <form> VMs;
154 * 3) The beginning and end IPAs are not page aligned;
155 * 4) The requested share type was not handled.
156 * Success is indicated by true.
157 *
158 */
159bool spci_msg_check_transition(struct vm *to, struct vm *from,
160 enum spci_memory_share share,
161 int *orig_from_mode, ipaddr_t begin,
162 ipaddr_t end, uint32_t memory_to_attributes,
163 int *from_mode, int *to_mode)
164{
165 int orig_to_mode;
166 const struct spci_mem_transitions *mem_transition_table;
167 uint32_t transition_table_size;
168
169 /*
170 * TODO: Transition table does not currently consider the multiple
171 * shared case.
172 */
173 static const struct spci_mem_transitions donate_transitions[] = {
174 {
175 /* 1) {O-EA, !O-NA} -> {!O-NA, O-EA} */
176 .orig_from_mode = 0,
177 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
178 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
179 .to_mode = 0,
180 },
181 {
182 /* 2) {O-NA, !O-EA} -> {!O-NA, O-EA} */
183 .orig_from_mode = MM_MODE_INVALID,
184 .orig_to_mode = MM_MODE_UNOWNED,
185 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
186 .to_mode = 0,
187 },
188 {
189 /* 3) {O-SA, !O-SA} -> {!O-NA, O-EA} */
190 .orig_from_mode = MM_MODE_SHARED,
191 .orig_to_mode = MM_MODE_UNOWNED | MM_MODE_SHARED,
192 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED,
193 .to_mode = 0,
194 },
195 {
196 /*
197 * Duplicate of 1) in order to cater for an alternative
198 * representation of !O-NA:
199 * (INVALID | UNOWNED | SHARED) and (INVALID | UNOWNED)
200 * are both alternate representations of !O-NA.
201 */
202 /* 4) {O-EA, !O-NA} -> {!O-NA, O-EA} */
203 .orig_from_mode = 0,
204 .orig_to_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
205 MM_MODE_SHARED,
206 .from_mode = MM_MODE_INVALID | MM_MODE_UNOWNED |
207 MM_MODE_SHARED,
208 .to_mode = 0,
209 },
210 };
211 static const uint32_t size_donate_transitions =
212 ARRAY_SIZE(donate_transitions);
213
214 /* Fail if addresses are not page-aligned. */
215 if (!is_aligned(ipa_addr(begin), PAGE_SIZE) ||
216 !is_aligned(ipa_addr(end), PAGE_SIZE)) {
217 return false;
218 }
219
220 /*
221 * Ensure that the memory range is mapped with the same
222 * mode.
223 */
224 if (!mm_vm_get_mode(&from->ptable, begin, end, orig_from_mode) ||
225 !mm_vm_get_mode(&to->ptable, begin, end, &orig_to_mode)) {
226 return false;
227 }
228
229 switch (share) {
230 case SPCI_MEMORY_DONATE:
231 mem_transition_table = donate_transitions;
232 transition_table_size = size_donate_transitions;
233 break;
234
235 default:
236 return false;
237 }
238
239 return spci_msg_get_next_state(mem_transition_table,
240 transition_table_size,
241 memory_to_attributes, *orig_from_mode,
242 orig_to_mode, from_mode, to_mode);
243}