blob: e96787ee33c2885b444432f7d5232fdc911fc85a [file] [log] [blame]
J-Alves7581c382020-05-07 18:34:20 +01001/*
2 * Copyright (c) 2018-2020, Arm Limited. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef FFA_HELPERS_H
8#define FFA_HELPERS_H
9
J-Alves8f08a052020-05-26 17:14:40 +010010#include <ffa_svc.h>
J-Alves7581c382020-05-07 18:34:20 +010011#include <tftf_lib.h>
12#include <utils_def.h>
13
14/* This error code must be different to the ones used by FFA */
15#define FFA_TFTF_ERROR -42
16
J-Alves5aecd982020-06-11 10:25:33 +010017/* Hypervisor ID at physical FFA instance */
18#define HYP_ID (0)
19
20/* By convention, SP IDs (as opposed to VM IDs) have bit 15 set */
21#define SP_ID(x) ((x) | (1 << 15))
22
23typedef unsigned short ffa_vm_id_t;
24typedef unsigned short ffa_vm_count_t;
25typedef unsigned short ffa_vcpu_count_t;
Manish Pandey6b3840a2020-09-15 22:31:58 +010026typedef uint32_t ffa_int_id_t;
J-Alvesf3a393c2020-10-23 16:00:39 +010027typedef uint64_t ffa_memory_handle_t;
28/** Flags to indicate properties of receivers during memory region retrieval. */
29typedef uint8_t ffa_memory_receiver_flags_t;
J-Alves5aecd982020-06-11 10:25:33 +010030
J-Alvesd708c032020-11-19 12:14:21 +000031struct ffa_uuid {
32 const uint32_t uuid[4];
33};
34
J-Alves7581c382020-05-07 18:34:20 +010035#ifndef __ASSEMBLY__
36
37#include <stdint.h>
38
Max Shvetsovc32f4782020-06-23 09:41:15 +010039struct mailbox_buffers {
J-Alvesc031cc02020-11-02 17:26:02 +000040 void *recv;
Max Shvetsovc32f4782020-06-23 09:41:15 +010041 void *send;
42};
43
J-Alvesc031cc02020-11-02 17:26:02 +000044#define CONFIGURE_MAILBOX(mb_name, buffers_size) \
45 do { \
46 /* Declare RX/TX buffers at virtual FF-A instance */ \
47 static struct { \
48 uint8_t rx[buffers_size]; \
49 uint8_t tx[buffers_size]; \
50 } __aligned(PAGE_SIZE) mb_buffers; \
51 mb_name.recv = (void *)mb_buffers.rx; \
52 mb_name.send = (void *)mb_buffers.tx; \
53 } while (false)
54
55#define CONFIGURE_AND_MAP_MAILBOX(mb_name, buffers_size, smc_ret) \
56 do { \
57 CONFIGURE_MAILBOX(mb_name, buffers_size); \
58 smc_ret = ffa_rxtx_map( \
59 (uintptr_t)mb_name.send, \
60 (uintptr_t)mb_name.recv, \
61 buffers_size / PAGE_SIZE \
62 ); \
63 } while (false)
64
Max Shvetsovc32f4782020-06-23 09:41:15 +010065struct ffa_partition_info {
66 /** The ID of the VM the information is about */
67 ffa_vm_id_t id;
68 /** The number of execution contexts implemented by the partition */
69 uint16_t exec_context;
70 /** The Partition's properties, e.g. supported messaging methods */
71 uint32_t properties;
72};
73
J-Alvesf3a393c2020-10-23 16:00:39 +010074enum ffa_data_access {
75 FFA_DATA_ACCESS_NOT_SPECIFIED,
76 FFA_DATA_ACCESS_RO,
77 FFA_DATA_ACCESS_RW,
78 FFA_DATA_ACCESS_RESERVED,
79};
80
81enum ffa_instruction_access {
82 FFA_INSTRUCTION_ACCESS_NOT_SPECIFIED,
83 FFA_INSTRUCTION_ACCESS_NX,
84 FFA_INSTRUCTION_ACCESS_X,
85 FFA_INSTRUCTION_ACCESS_RESERVED,
86};
87
88enum ffa_memory_type {
89 FFA_MEMORY_NOT_SPECIFIED_MEM,
90 FFA_MEMORY_DEVICE_MEM,
91 FFA_MEMORY_NORMAL_MEM,
92};
93
94enum ffa_memory_cacheability {
95 FFA_MEMORY_CACHE_RESERVED = 0x0,
96 FFA_MEMORY_CACHE_NON_CACHEABLE = 0x1,
97 FFA_MEMORY_CACHE_RESERVED_1 = 0x2,
98 FFA_MEMORY_CACHE_WRITE_BACK = 0x3,
99 FFA_MEMORY_DEV_NGNRNE = 0x0,
100 FFA_MEMORY_DEV_NGNRE = 0x1,
101 FFA_MEMORY_DEV_NGRE = 0x2,
102 FFA_MEMORY_DEV_GRE = 0x3,
103};
104
105enum ffa_memory_shareability {
106 FFA_MEMORY_SHARE_NON_SHAREABLE,
107 FFA_MEMORY_SHARE_RESERVED,
108 FFA_MEMORY_OUTER_SHAREABLE,
109 FFA_MEMORY_INNER_SHAREABLE,
110};
111
112typedef uint8_t ffa_memory_access_permissions_t;
113
114/**
115 * This corresponds to table "Memory region attributes descriptor" of the FF-A
116 * 1.0 specification.
117 */
118typedef uint8_t ffa_memory_attributes_t;
119
120#define FFA_DATA_ACCESS_OFFSET (0x0U)
121#define FFA_DATA_ACCESS_MASK ((0x3U) << FFA_DATA_ACCESS_OFFSET)
122
123#define FFA_INSTRUCTION_ACCESS_OFFSET (0x2U)
124#define FFA_INSTRUCTION_ACCESS_MASK ((0x3U) << FFA_INSTRUCTION_ACCESS_OFFSET)
125
126#define FFA_MEMORY_TYPE_OFFSET (0x4U)
127#define FFA_MEMORY_TYPE_MASK ((0x3U) << FFA_MEMORY_TYPE_OFFSET)
128
129#define FFA_MEMORY_CACHEABILITY_OFFSET (0x2U)
130#define FFA_MEMORY_CACHEABILITY_MASK ((0x3U) << FFA_MEMORY_CACHEABILITY_OFFSET)
131
132#define FFA_MEMORY_SHAREABILITY_OFFSET (0x0U)
133#define FFA_MEMORY_SHAREABILITY_MASK ((0x3U) << FFA_MEMORY_SHAREABILITY_OFFSET)
134
135#define ATTR_FUNCTION_SET(name, container_type, offset, mask) \
136 static inline void ffa_set_##name##_attr(container_type *attr, \
137 const enum ffa_##name perm) \
138 { \
139 *attr = (*attr & ~(mask)) | ((perm << offset) & mask); \
140 }
141
142#define ATTR_FUNCTION_GET(name, container_type, offset, mask) \
143 static inline enum ffa_##name ffa_get_##name##_attr( \
144 container_type attr) \
145 { \
146 return (enum ffa_##name)((attr & mask) >> offset); \
147 }
148
149ATTR_FUNCTION_SET(data_access, ffa_memory_access_permissions_t,
150 FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
151ATTR_FUNCTION_GET(data_access, ffa_memory_access_permissions_t,
152 FFA_DATA_ACCESS_OFFSET, FFA_DATA_ACCESS_MASK)
153
154ATTR_FUNCTION_SET(instruction_access, ffa_memory_access_permissions_t,
155 FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
156ATTR_FUNCTION_GET(instruction_access, ffa_memory_access_permissions_t,
157 FFA_INSTRUCTION_ACCESS_OFFSET, FFA_INSTRUCTION_ACCESS_MASK)
158
159ATTR_FUNCTION_SET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
160 FFA_MEMORY_TYPE_MASK)
161ATTR_FUNCTION_GET(memory_type, ffa_memory_attributes_t, FFA_MEMORY_TYPE_OFFSET,
162 FFA_MEMORY_TYPE_MASK)
163
164ATTR_FUNCTION_SET(memory_cacheability, ffa_memory_attributes_t,
165 FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
166ATTR_FUNCTION_GET(memory_cacheability, ffa_memory_attributes_t,
167 FFA_MEMORY_CACHEABILITY_OFFSET, FFA_MEMORY_CACHEABILITY_MASK)
168
169ATTR_FUNCTION_SET(memory_shareability, ffa_memory_attributes_t,
170 FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
171ATTR_FUNCTION_GET(memory_shareability, ffa_memory_attributes_t,
172 FFA_MEMORY_SHAREABILITY_OFFSET, FFA_MEMORY_SHAREABILITY_MASK)
173
174#define FFA_MEMORY_HANDLE_ALLOCATOR_MASK \
175 ((ffa_memory_handle_t)(UINT64_C(1) << 63))
176#define FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR \
177 ((ffa_memory_handle_t)(UINT64_C(1) << 63))
178#define FFA_MEMORY_HANDLE_INVALID (~UINT64_C(0))
179
180/**
181 * A set of contiguous pages which is part of a memory region. This corresponds
182 * to table "Constituent memory region descriptor" of the FFA 1.0 specification.
183 */
184struct ffa_memory_region_constituent {
185 /**
186 * The base IPA of the constituent memory region, aligned to 4 kiB page
187 * size granularity.
188 */
189 void *address;
190 /** The number of 4 kiB pages in the constituent memory region. */
191 uint32_t page_count;
192 /** Reserved field, must be 0. */
193 uint32_t reserved;
194};
195
196/**
197 * A set of pages comprising a memory region. This corresponds to table
198 * "Composite memory region descriptor" of the FFA 1.0 specification.
199 */
200struct ffa_composite_memory_region {
201 /**
202 * The total number of 4 kiB pages included in this memory region. This
203 * must be equal to the sum of page counts specified in each
204 * `ffa_memory_region_constituent`.
205 */
206 uint32_t page_count;
207 /**
208 * The number of constituents (`ffa_memory_region_constituent`)
209 * included in this memory region range.
210 */
211 uint32_t constituent_count;
212 /** Reserved field, must be 0. */
213 uint64_t reserved_0;
214 /** An array of `constituent_count` memory region constituents. */
215 struct ffa_memory_region_constituent constituents[];
216};
217
218/**
219 * This corresponds to table "Memory access permissions descriptor" of the FFA
220 * 1.0 specification.
221 */
222struct ffa_memory_region_attributes {
223 /** The ID of the VM to which the memory is being given or shared. */
224 ffa_vm_id_t receiver;
225 /**
226 * The permissions with which the memory region should be mapped in the
227 * receiver's page table.
228 */
229 ffa_memory_access_permissions_t permissions;
230 /**
231 * Flags used during FFA_MEM_RETRIEVE_REQ and FFA_MEM_RETRIEVE_RESP
232 * for memory regions with multiple borrowers.
233 */
234 ffa_memory_receiver_flags_t flags;
235};
236
237/** Flags to control the behaviour of a memory sharing transaction. */
238typedef uint32_t ffa_memory_region_flags_t;
239
240/**
241 * Clear memory region contents after unmapping it from the sender and before
242 * mapping it for any receiver.
243 */
244#define FFA_MEMORY_REGION_FLAG_CLEAR 0x1U
245
246/**
247 * Whether the hypervisor may time slice the memory sharing or retrieval
248 * operation.
249 */
250#define FFA_MEMORY_REGION_FLAG_TIME_SLICE 0x2U
251
252/**
253 * Whether the hypervisor should clear the memory region after the receiver
254 * relinquishes it or is aborted.
255 */
256#define FFA_MEMORY_REGION_FLAG_CLEAR_RELINQUISH 0x4U
257
258#define FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK ((0x3U) << 3)
259#define FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED ((0x0U) << 3)
260#define FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE ((0x1U) << 3)
261#define FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND ((0x2U) << 3)
262#define FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE ((0x3U) << 3)
263
J-Alves0435cae2020-11-06 10:49:56 +0000264/** The maximum number of recipients a memory region may be sent to. */
265#define MAX_MEM_SHARE_RECIPIENTS 1U
266
J-Alvesf3a393c2020-10-23 16:00:39 +0100267/**
268 * This corresponds to table "Endpoint memory access descriptor" of the FFA 1.0
269 * specification.
270 */
271struct ffa_memory_access {
272 struct ffa_memory_region_attributes receiver_permissions;
273 /**
274 * Offset in bytes from the start of the outer `ffa_memory_region` to
275 * an `ffa_composite_memory_region` struct.
276 */
277 uint32_t composite_memory_region_offset;
278 uint64_t reserved_0;
279};
280
281/**
282 * Information about a set of pages which are being shared. This corresponds to
283 * table "Lend, donate or share memory transaction descriptor" of the FFA
284 * 1.0 specification. Note that it is also used for retrieve requests and
285 * responses.
286 */
287struct ffa_memory_region {
288 /**
289 * The ID of the VM which originally sent the memory region, i.e. the
290 * owner.
291 */
292 ffa_vm_id_t sender;
293 ffa_memory_attributes_t attributes;
294 /** Reserved field, must be 0. */
295 uint8_t reserved_0;
296 /** Flags to control behaviour of the transaction. */
297 ffa_memory_region_flags_t flags;
298 ffa_memory_handle_t handle;
299 /**
300 * An implementation defined value associated with the receiver and the
301 * memory region.
302 */
303 uint64_t tag;
304 /** Reserved field, must be 0. */
305 uint32_t reserved_1;
306 /**
307 * The number of `ffa_memory_access` entries included in this
308 * transaction.
309 */
310 uint32_t receiver_count;
311 /**
312 * An array of `attribute_count` endpoint memory access descriptors.
313 * Each one specifies a memory region offset, an endpoint and the
314 * attributes with which this memory region should be mapped in that
315 * endpoint's page table.
316 */
317 struct ffa_memory_access receivers[];
318};
319
320/**
321 * Descriptor used for FFA_MEM_RELINQUISH requests. This corresponds to table
322 * "Descriptor to relinquish a memory region" of the FFA 1.0 specification.
323 */
324struct ffa_mem_relinquish {
325 ffa_memory_handle_t handle;
326 ffa_memory_region_flags_t flags;
327 uint32_t endpoint_count;
328 ffa_vm_id_t endpoints[];
329};
330
331static inline ffa_memory_handle_t ffa_assemble_handle(uint32_t h1, uint32_t h2)
332{
333 return (uint64_t)h1 | (uint64_t)h2 << 32;
334}
335
336static inline ffa_memory_handle_t ffa_mem_success_handle(smc_ret_values r)
337{
338 return ffa_assemble_handle(r.ret2, r.ret3);
339}
340
341/**
342 * Gets the `ffa_composite_memory_region` for the given receiver from an
343 * `ffa_memory_region`, or NULL if it is not valid.
344 */
345static inline struct ffa_composite_memory_region *
346ffa_memory_region_get_composite(struct ffa_memory_region *memory_region,
347 uint32_t receiver_index)
348{
349 uint32_t offset = memory_region->receivers[receiver_index]
350 .composite_memory_region_offset;
351
352 if (offset == 0) {
353 return NULL;
354 }
355
356 return (struct ffa_composite_memory_region *)((uint8_t *)memory_region +
357 offset);
358}
359
360static inline uint32_t ffa_mem_relinquish_init(
361 struct ffa_mem_relinquish *relinquish_request,
362 ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
363 ffa_vm_id_t sender)
364{
365 relinquish_request->handle = handle;
366 relinquish_request->flags = flags;
367 relinquish_request->endpoint_count = 1;
368 relinquish_request->endpoints[0] = sender;
369 return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_vm_id_t);
370}
371
372uint32_t ffa_memory_retrieve_request_init(
373 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
374 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
375 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
376 enum ffa_instruction_access instruction_access,
377 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
378 enum ffa_memory_shareability shareability);
379
380uint32_t ffa_memory_region_init(
381 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
382 ffa_vm_id_t sender, ffa_vm_id_t receiver,
383 const struct ffa_memory_region_constituent constituents[],
384 uint32_t constituent_count, uint32_t tag,
385 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
386 enum ffa_instruction_access instruction_access,
387 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
388 enum ffa_memory_shareability shareability, uint32_t *total_length,
389 uint32_t *fragment_length);
390
Max Shvetsovc17c1d32020-06-11 15:03:01 +0100391bool check_spmc_execution_level(void);
J-Alves7581c382020-05-07 18:34:20 +0100392smc_ret_values ffa_msg_send_direct_req(uint32_t source_id, uint32_t dest_id, uint32_t message);
393smc_ret_values ffa_msg_send_direct_req64(uint32_t source_id, uint32_t dest_id, uint64_t message);
J-Alvesd1aae292020-10-08 17:16:58 +0100394smc_ret_values ffa_msg_send_direct_req64_5args(uint32_t source_id, uint32_t dest_id,
395 uint64_t arg0, uint64_t arg1,
396 uint64_t arg2, uint64_t arg3,
397 uint64_t arg4);
398
J-Alves7581c382020-05-07 18:34:20 +0100399smc_ret_values ffa_run(uint32_t dest_id, uint32_t vcpu_id);
J-Alves8f08a052020-05-26 17:14:40 +0100400smc_ret_values ffa_version(uint32_t input_version);
J-Alves5aecd982020-06-11 10:25:33 +0100401smc_ret_values ffa_id_get(void);
402smc_ret_values ffa_msg_wait(void);
403smc_ret_values ffa_msg_send_direct_resp(ffa_vm_id_t source_id,
404 ffa_vm_id_t dest_id, uint32_t message);
405smc_ret_values ffa_error(int32_t error_code);
Max Shvetsovc17c1d32020-06-11 15:03:01 +0100406smc_ret_values ffa_features(uint32_t feature);
Max Shvetsovc32f4782020-06-23 09:41:15 +0100407smc_ret_values ffa_partition_info_get(const uint32_t uuid[4]);
408smc_ret_values ffa_rx_release(void);
Ruari Phippsbd0a7e42020-07-17 16:42:21 +0100409smc_ret_values ffa_rxtx_map(uintptr_t send, uintptr_t recv, uint32_t pages);
J-Alves7581c382020-05-07 18:34:20 +0100410
J-Alves3ea46d12020-09-09 11:13:05 +0100411smc_ret_values ffa_mem_donate(uint32_t descriptor_length,
412 uint32_t fragment_length);
413smc_ret_values ffa_mem_lend(uint32_t descriptor_length,
414 uint32_t fragment_length);
415smc_ret_values ffa_mem_share(uint32_t descriptor_length,
416 uint32_t fragment_length);
417smc_ret_values ffa_mem_retrieve_req(uint32_t descriptor_length,
418 uint32_t fragment_length);
419smc_ret_values ffa_mem_relinquish(void);
420smc_ret_values ffa_mem_reclaim(uint64_t handle, uint32_t flags);
421
J-Alves7581c382020-05-07 18:34:20 +0100422#endif /* __ASSEMBLY__ */
423
424#endif /* FFA_HELPERS_H */