blob: c91ed5f0910fc049e4af2713d6dda3e6fd2c5889 [file] [log] [blame]
J-Alves7581c382020-05-07 18:34:20 +01001/*
2 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef FFA_HELPERS_H
8#define FFA_HELPERS_H
9
J-Alves8f08a052020-05-26 17:14:40 +010010#include <ffa_svc.h>
J-Alves7581c382020-05-07 18:34:20 +010011#include <tftf_lib.h>
12#include <utils_def.h>
13
14/* This error code must be different to the ones used by FFA */
15#define FFA_TFTF_ERROR -42
16
J-Alves5aecd982020-06-11 10:25:33 +010017/* Hypervisor ID at physical FFA instance */
18#define HYP_ID (0)
19
20/* By convention, SP IDs (as opposed to VM IDs) have bit 15 set */
21#define SP_ID(x) ((x) | (1 << 15))
22
23typedef unsigned short ffa_vm_id_t;
24typedef unsigned short ffa_vm_count_t;
25typedef unsigned short ffa_vcpu_count_t;
Manish Pandey6b3840a2020-09-15 22:31:58 +010026typedef uint32_t ffa_int_id_t;
J-Alvesf3a393c2020-10-23 16:00:39 +010027typedef uint64_t ffa_memory_handle_t;
28/** Flags to indicate properties of receivers during memory region retrieval. */
29typedef uint8_t ffa_memory_receiver_flags_t;
J-Alves5aecd982020-06-11 10:25:33 +010030
J-Alves7581c382020-05-07 18:34:20 +010031#ifndef __ASSEMBLY__
32
33#include <stdint.h>
34
Max Shvetsovc32f4782020-06-23 09:41:15 +010035struct mailbox_buffers {
J-Alvesc031cc02020-11-02 17:26:02 +000036 void *recv;
Max Shvetsovc32f4782020-06-23 09:41:15 +010037 void *send;
38};
39
J-Alvesc031cc02020-11-02 17:26:02 +000040#define CONFIGURE_MAILBOX(mb_name, buffers_size) \
41 do { \
42 /* Declare RX/TX buffers at virtual FF-A instance */ \
43 static struct { \
44 uint8_t rx[buffers_size]; \
45 uint8_t tx[buffers_size]; \
46 } __aligned(PAGE_SIZE) mb_buffers; \
47 mb_name.recv = (void *)mb_buffers.rx; \
48 mb_name.send = (void *)mb_buffers.tx; \
49 } while (false)
50
51#define CONFIGURE_AND_MAP_MAILBOX(mb_name, buffers_size, smc_ret) \
52 do { \
53 CONFIGURE_MAILBOX(mb_name, buffers_size); \
54 smc_ret = ffa_rxtx_map( \
55 (uintptr_t)mb_name.send, \
56 (uintptr_t)mb_name.recv, \
57 buffers_size / PAGE_SIZE \
58 ); \
59 } while (false)
60
Max Shvetsovc32f4782020-06-23 09:41:15 +010061struct ffa_partition_info {
62 /** The ID of the VM the information is about */
63 ffa_vm_id_t id;
64 /** The number of execution contexts implemented by the partition */
65 uint16_t exec_context;
66 /** The Partition's properties, e.g. supported messaging methods */
67 uint32_t properties;
68};
69
J-Alvesf3a393c2020-10-23 16:00:39 +010070enum ffa_data_access {
71 FFA_DATA_ACCESS_NOT_SPECIFIED,
72 FFA_DATA_ACCESS_RO,
73 FFA_DATA_ACCESS_RW,
74 FFA_DATA_ACCESS_RESERVED,
75};
76
77enum ffa_instruction_access {
78 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
79 FFA_INSTRUCTION_ACCESS_NX,
80 FFA_INSTRUCTION_ACCESS_X,
81 FFA_INSTRUCTION_ACCESS_RESERVED,
82};
83
84enum ffa_memory_type {
85 FFA_MEMORY_NOT_SPECIFIED_MEM,
86 FFA_MEMORY_DEVICE_MEM,
87 FFA_MEMORY_NORMAL_MEM,
88};
89
90enum ffa_memory_cacheability {
91 FFA_MEMORY_CACHE_RESERVED = 0x0,
92 FFA_MEMORY_CACHE_NON_CACHEABLE = 0x1,
93 FFA_MEMORY_CACHE_RESERVED_1 = 0x2,
94 FFA_MEMORY_CACHE_WRITE_BACK = 0x3,
95 FFA_MEMORY_DEV_NGNRNE = 0x0,
96 FFA_MEMORY_DEV_NGNRE = 0x1,
97 FFA_MEMORY_DEV_NGRE = 0x2,
98 FFA_MEMORY_DEV_GRE = 0x3,
99};
100
101enum ffa_memory_shareability {
102 FFA_MEMORY_SHARE_NON_SHAREABLE,
103 FFA_MEMORY_SHARE_RESERVED,
104 FFA_MEMORY_OUTER_SHAREABLE,
105 FFA_MEMORY_INNER_SHAREABLE,
106};
107
108typedef uint8_t ffa_memory_access_permissions_t;
109
110/**
111 * This corresponds to table "Memory region attributes descriptor" of the FF-A
112 * 1.0 specification.
113 */
114typedef uint8_t ffa_memory_attributes_t;
115
116#define FFA_DATA_ACCESS_OFFSET (0x0U)
117#define FFA_DATA_ACCESS_MASK ((0x3U) << FFA_DATA_ACCESS_OFFSET)
118
119#define FFA_INSTRUCTION_ACCESS_OFFSET (0x2U)
120#define FFA_INSTRUCTION_ACCESS_MASK ((0x3U) << FFA_INSTRUCTION_ACCESS_OFFSET)
121
122#define FFA_MEMORY_TYPE_OFFSET (0x4U)
123#define FFA_MEMORY_TYPE_MASK ((0x3U) << FFA_MEMORY_TYPE_OFFSET)
124
125#define FFA_MEMORY_CACHEABILITY_OFFSET (0x2U)
126#define FFA_MEMORY_CACHEABILITY_MASK ((0x3U) << FFA_MEMORY_CACHEABILITY_OFFSET)
127
128#define FFA_MEMORY_SHAREABILITY_OFFSET (0x0U)
129#define FFA_MEMORY_SHAREABILITY_MASK ((0x3U) << FFA_MEMORY_SHAREABILITY_OFFSET)
130
131#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
132 static inline void ffa_set_##name##_attr(container_type *attr, \
133 const enum ffa_##name perm) \
134 { \
135 *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
136 }
137
138#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
139 static inline enum ffa_##name ffa_get_##name##_attr( \
140 container_type attr) \
141 { \
142 return (enum ffa_##name)((attr & mask) >> offset); \
143 }
144
145ATTR_FUNCTION_SET(data_access, ffa_memory_access_permissions_t,
146 FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
147ATTR_FUNCTION_GET(data_access, ffa_memory_access_permissions_t,
148 FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
149
150ATTR_FUNCTION_SET(instruction_access, ffa_memory_access_permissions_t,
151 FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
152ATTR_FUNCTION_GET(instruction_access, ffa_memory_access_permissions_t,
153 FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
154
155ATTR_FUNCTION_SET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
156 FFA_MEMORY_TYPE_MASK)
157ATTR_FUNCTION_GET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
158 FFA_MEMORY_TYPE_MASK)
159
160ATTR_FUNCTION_SET(memory_cacheability, ffa_memory_attributes_t,
161 FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
162ATTR_FUNCTION_GET(memory_cacheability, ffa_memory_attributes_t,
163 FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
164
165ATTR_FUNCTION_SET(memory_shareability, ffa_memory_attributes_t,
166 FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
167ATTR_FUNCTION_GET(memory_shareability, ffa_memory_attributes_t,
168 FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
169
170#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK \
171 ((ffa_memory_handle_t)(UINT64_C(1) << 63))
172#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
173 ((ffa_memory_handle_t)(UINT64_C(1) << 63))
174#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
175
176/**
177 * A set of contiguous pages which is part of a memory region. This corresponds
178 * to table "Constituent memory region descriptor" of the FFA 1.0 specification.
179 */
180struct ffa_memory_region_constituent {
181 /**
182 * The base IPA of the constituent memory region, aligned to 4 kiB page
183 * size granularity.
184 */
185 void *address;
186 /** The number of 4 kiB pages in the constituent memory region. */
187 uint32_t page_count;
188 /** Reserved field, must be 0. */
189 uint32_t reserved;
190};
191
192/**
193 * A set of pages comprising a memory region. This corresponds to table
194 * "Composite memory region descriptor" of the FFA 1.0 specification.
195 */
196struct ffa_composite_memory_region {
197 /**
198 * The total number of 4 kiB pages included in this memory region. This
199 * must be equal to the sum of page counts specified in each
200 * `ffa_memory_region_constituent`.
201 */
202 uint32_t page_count;
203 /**
204 * The number of constituents (`ffa_memory_region_constituent`)
205 * included in this memory region range.
206 */
207 uint32_t constituent_count;
208 /** Reserved field, must be 0. */
209 uint64_t reserved_0;
210 /** An array of `constituent_count` memory region constituents. */
211 struct ffa_memory_region_constituent constituents[];
212};
213
214/**
215 * This corresponds to table "Memory access permissions descriptor" of the FFA
216 * 1.0 specification.
217 */
218struct ffa_memory_region_attributes {
219 /** The ID of the VM to which the memory is being given or shared. */
220 ffa_vm_id_t receiver;
221 /**
222 * The permissions with which the memory region should be mapped in the
223 * receiver's page table.
224 */
225 ffa_memory_access_permissions_t permissions;
226 /**
227 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
228 * for memory regions with multiple borrowers.
229 */
230 ffa_memory_receiver_flags_t flags;
231};
232
233/** Flags to control the behaviour of a memory sharing transaction. */
234typedef uint32_t ffa_memory_region_flags_t;
235
236/**
237 * Clear memory region contents after unmapping it from the sender and before
238 * mapping it for any receiver.
239 */
240#define FFA_MEMORY_REGION_FLAG_CLEAR 0x1U
241
242/**
243 * Whether the hypervisor may time slice the memory sharing or retrieval
244 * operation.
245 */
246#define FFA_MEMORY_REGION_FLAG_TIME_SLICE 0x2U
247
248/**
249 * Whether the hypervisor should clear the memory region after the receiver
250 * relinquishes it or is aborted.
251 */
252#define FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH 0x4U
253
254#define FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK ((0x3U) << 3)
255#define FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED ((0x0U) << 3)
256#define FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE ((0x1U) << 3)
257#define FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND ((0x2U) << 3)
258#define FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
259
J-Alves0435cae2020-11-06 10:49:56 +0000260/** The maximum number of recipients a memory region may be sent to. */
261#define MAX_MEM_SHARE_RECIPIENTS 1U
262
J-Alvesf3a393c2020-10-23 16:00:39 +0100263/**
264 * This corresponds to table "Endpoint memory access descriptor" of the FFA 1.0
265 * specification.
266 */
267struct ffa_memory_access {
268 struct ffa_memory_region_attributes receiver_permissions;
269 /**
270 * Offset in bytes from the start of the outer `ffa_memory_region` to
271 * an `ffa_composite_memory_region` struct.
272 */
273 uint32_t composite_memory_region_offset;
274 uint64_t reserved_0;
275};
276
277/**
278 * Information about a set of pages which are being shared. This corresponds to
279 * table "Lend, donate or share memory transaction descriptor" of the FFA
280 * 1.0 specification. Note that it is also used for retrieve requests and
281 * responses.
282 */
283struct ffa_memory_region {
284 /**
285 * The ID of the VM which originally sent the memory region, i.e. the
286 * owner.
287 */
288 ffa_vm_id_t sender;
289 ffa_memory_attributes_t attributes;
290 /** Reserved field, must be 0. */
291 uint8_t reserved_0;
292 /** Flags to control behaviour of the transaction. */
293 ffa_memory_region_flags_t flags;
294 ffa_memory_handle_t handle;
295 /**
296 * An implementation defined value associated with the receiver and the
297 * memory region.
298 */
299 uint64_t tag;
300 /** Reserved field, must be 0. */
301 uint32_t reserved_1;
302 /**
303 * The number of `ffa_memory_access` entries included in this
304 * transaction.
305 */
306 uint32_t receiver_count;
307 /**
308 * An array of `attribute_count` endpoint memory access descriptors.
309 * Each one specifies a memory region offset, an endpoint and the
310 * attributes with which this memory region should be mapped in that
311 * endpoint's page table.
312 */
313 struct ffa_memory_access receivers[];
314};
315
316/**
317 * Descriptor used for FFA_MEM_RELINQUISH requests. This corresponds to table
318 * "Descriptor to relinquish a memory region" of the FFA 1.0 specification.
319 */
320struct ffa_mem_relinquish {
321 ffa_memory_handle_t handle;
322 ffa_memory_region_flags_t flags;
323 uint32_t endpoint_count;
324 ffa_vm_id_t endpoints[];
325};
326
327static inline ffa_memory_handle_t ffa_assemble_handle(uint32_t h1, uint32_t h2)
328{
329 return (uint64_t)h1 | (uint64_t)h2 << 32;
330}
331
332static inline ffa_memory_handle_t ffa_mem_success_handle(smc_ret_values r)
333{
334 return ffa_assemble_handle(r.ret2, r.ret3);
335}
336
337/**
338 * Gets the `ffa_composite_memory_region` for the given receiver from an
339 * `ffa_memory_region`, or NULL if it is not valid.
340 */
341static inline struct ffa_composite_memory_region *
342ffa_memory_region_get_composite(struct ffa_memory_region *memory_region,
343 uint32_t receiver_index)
344{
345 uint32_t offset = memory_region->receivers[receiver_index]
346 .composite_memory_region_offset;
347
348 if (offset == 0) {
349 return NULL;
350 }
351
352 return (struct ffa_composite_memory_region *)((uint8_t *)memory_region +
353 offset);
354}
355
356static inline uint32_t ffa_mem_relinquish_init(
357 struct ffa_mem_relinquish *relinquish_request,
358 ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
359 ffa_vm_id_t sender)
360{
361 relinquish_request->handle = handle;
362 relinquish_request->flags = flags;
363 relinquish_request->endpoint_count = 1;
364 relinquish_request->endpoints[0] = sender;
365 return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_vm_id_t);
366}
367
368uint32_t ffa_memory_retrieve_request_init(
369 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
370 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
371 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
372 enum ffa_instruction_access instruction_access,
373 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
374 enum ffa_memory_shareability shareability);
375
376uint32_t ffa_memory_region_init(
377 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
378 ffa_vm_id_t sender, ffa_vm_id_t receiver,
379 const struct ffa_memory_region_constituent constituents[],
380 uint32_t constituent_count, uint32_t tag,
381 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
382 enum ffa_instruction_access instruction_access,
383 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
384 enum ffa_memory_shareability shareability, uint32_t *total_length,
385 uint32_t *fragment_length);
386
Max Shvetsovc17c1d32020-06-11 15:03:01 +0100387bool check_spmc_execution_level(void);
J-Alves7581c382020-05-07 18:34:20 +0100388smc_ret_values ffa_msg_send_direct_req(uint32_t source_id, uint32_t dest_id, uint32_t message);
389smc_ret_values ffa_msg_send_direct_req64(uint32_t source_id, uint32_t dest_id, uint64_t message);
J-Alvesd1aae292020-10-08 17:16:58 +0100390smc_ret_values ffa_msg_send_direct_req64_5args(uint32_t source_id, uint32_t dest_id,
391 uint64_t arg0, uint64_t arg1,
392 uint64_t arg2, uint64_t arg3,
393 uint64_t arg4);
394
J-Alves7581c382020-05-07 18:34:20 +0100395smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id);
J-Alves8f08a052020-05-26 17:14:40 +0100396smc_ret_values ffa_version(uint32_t input_version);
J-Alves5aecd982020-06-11 10:25:33 +0100397smc_ret_values ffa_id_get(void);
398smc_ret_values ffa_msg_wait(void);
399smc_ret_values ffa_msg_send_direct_resp(ffa_vm_id_t source_id,
400 ffa_vm_id_t dest_id, uint32_t message);
401smc_ret_values ffa_error(int32_t error_code);
Max Shvetsovc17c1d32020-06-11 15:03:01 +0100402smc_ret_values ffa_features(uint32_t feature);
Max Shvetsovc32f4782020-06-23 09:41:15 +0100403smc_ret_values ffa_partition_info_get(const uint32_t uuid[4]);
404smc_ret_values ffa_rx_release(void);
Ruari Phippsbd0a7e42020-07-17 16:42:21 +0100405smc_ret_values ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
J-Alves7581c382020-05-07 18:34:20 +0100406
J-Alves3ea46d12020-09-09 11:13:05 +0100407smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
408 uint32_t fragment_length);
409smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
410 uint32_t fragment_length);
411smc_ret_values ffa_mem_share(uint32_t descriptor_length,
412 uint32_t fragment_length);
413smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
414 uint32_t fragment_length);
415smc_ret_values ffa_mem_relinquish(void);
416smc_ret_values ffa_mem_reclaim(uint64_t handle, uint32_t flags);
417
J-Alves7581c382020-05-07 18:34:20 +0100418#endif /* __ASSEMBLY__ */
419
420#endif /* FFA_HELPERS_H */