blob: c0c4d919e198b4e8f36ad3b141072af3e1afdf0b [file] [log] [blame]
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01001/*
2 * Copyright 2019 The Hafnium Authors.
3 *
Andrew Walbrane959ec12020-06-17 15:01:09 +01004 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +01007 */
8
9#include "hf/ffa.h"
10
11#include <stddef.h>
12
J-Alves126ab502022-09-29 11:37:33 +010013#include "hf/ffa_v1_0.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010014#include "hf/types.h"
15
16#if defined(__linux__) && defined(__KERNEL__)
17#include <linux/kernel.h>
18#include <linux/string.h>
19
20#else
Federico Recanati392be392022-02-08 20:53:03 +010021#include "hf/static_assert.h"
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010022#include "hf/std.h"
23#endif
24
Federico Recanati392be392022-02-08 20:53:03 +010025static_assert(sizeof(struct ffa_endpoint_rx_tx_descriptor) % 16 == 0,
26 "struct ffa_endpoint_rx_tx_descriptor must be a multiple of 16 "
27 "bytes long.");
28
J-Alves126ab502022-09-29 11:37:33 +010029void ffa_copy_memory_region_constituents(
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -070030 struct ffa_memory_region_constituent *dest,
31 const struct ffa_memory_region_constituent *src)
32{
33 dest->address = src->address;
34 dest->page_count = src->page_count;
35 dest->reserved = 0;
36}
37
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010038/**
J-Alvesb44bb7d2022-04-25 15:43:46 +010039 * Initializes receiver permissions, in a memory transaction descriptor.
40 */
41void ffa_memory_access_init_permissions(
42 struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
43 enum ffa_data_access data_access,
44 enum ffa_instruction_access instruction_access,
45 ffa_memory_receiver_flags_t flags)
46{
47 ffa_memory_access_permissions_t permissions = 0;
48
49 /* Set memory region's permissions. */
50 ffa_set_data_access_attr(&permissions, data_access);
51 ffa_set_instruction_access_attr(&permissions, instruction_access);
52
53 receiver->receiver_permissions.receiver = receiver_id;
54 receiver->receiver_permissions.permissions = permissions;
55 receiver->receiver_permissions.flags = flags;
56
57 receiver->reserved_0 = 0ULL;
58}
59
60/**
61 * Initialises the header of the given `ffa_memory_region`, not
62 * including the composite memory region offset.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010063 */
Andrew Walbranca808b12020-05-15 17:22:28 +010064static void ffa_memory_region_init_header(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010065 struct ffa_memory_region *memory_region, ffa_vm_id_t sender,
66 ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +010067 ffa_memory_handle_t handle, uint32_t tag, uint32_t receiver_count)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010068{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010069 memory_region->sender = sender;
70 memory_region->attributes = attributes;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +010071 memory_region->flags = flags;
72 memory_region->handle = handle;
73 memory_region->tag = tag;
J-Alves0b6653d2022-04-22 13:17:38 +010074 memory_region->memory_access_desc_size =
75 sizeof(struct ffa_memory_access);
J-Alvesb44bb7d2022-04-25 15:43:46 +010076 memory_region->receiver_count = receiver_count;
J-Alves0b6653d2022-04-22 13:17:38 +010077 memory_region->receivers_offset =
78 offsetof(struct ffa_memory_region, receivers);
79#if defined(__linux__) && defined(__KERNEL__)
80 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
81#else
82 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
83 sizeof(memory_region->reserved));
84#endif
Andrew Walbranca808b12020-05-15 17:22:28 +010085}
86
87/**
J-Alvesf55620e2022-04-25 14:03:02 +010088 * Copies as many as possible of the given constituents to the respective
89 * memory region and sets the respective offset.
Andrew Walbranca808b12020-05-15 17:22:28 +010090 *
91 * Returns the number of constituents remaining which wouldn't fit, and (via
92 * return parameters) the size in bytes of the first fragment of data copied to
93 * `memory_region` (attributes, constituents and memory region header size), and
94 * the total size of the memory sharing message including all constituents.
95 */
J-Alvesf55620e2022-04-25 14:03:02 +010096static uint32_t ffa_memory_region_init_constituents(
Andrew Walbranca808b12020-05-15 17:22:28 +010097 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
Andrew Walbranca808b12020-05-15 17:22:28 +010098 const struct ffa_memory_region_constituent constituents[],
J-Alvesf55620e2022-04-25 14:03:02 +010099 uint32_t constituent_count, uint32_t *total_length,
Andrew Walbranca808b12020-05-15 17:22:28 +0100100 uint32_t *fragment_length)
101{
Andrew Walbranca808b12020-05-15 17:22:28 +0100102 struct ffa_composite_memory_region *composite_memory_region;
103 uint32_t fragment_max_constituents;
J-Alvesf55620e2022-04-25 14:03:02 +0100104 uint32_t constituents_offset;
Andrew Walbranca808b12020-05-15 17:22:28 +0100105 uint32_t count_to_copy;
106 uint32_t i;
Andrew Walbranca808b12020-05-15 17:22:28 +0100107
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100108 /*
109 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
110 * ffa_memory_access)` must both be multiples of 16 (as verified by the
111 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
112 * calculate here is aligned to a 64-bit boundary and so 64-bit values
113 * can be copied without alignment faults.
J-Alvesf55620e2022-04-25 14:03:02 +0100114 * If there are multiple receiver endpoints, their respective access
115 * structure should point to the same offset value.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100116 */
J-Alvesf55620e2022-04-25 14:03:02 +0100117 for (i = 0U; i < memory_region->receiver_count; i++) {
118 memory_region->receivers[i].composite_memory_region_offset =
119 sizeof(struct ffa_memory_region) +
120 memory_region->receiver_count *
121 sizeof(struct ffa_memory_access);
122 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100123
124 composite_memory_region =
125 ffa_memory_region_get_composite(memory_region, 0);
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100126 composite_memory_region->page_count = 0;
127 composite_memory_region->constituent_count = constituent_count;
128 composite_memory_region->reserved_0 = 0;
129
Andrew Walbranca808b12020-05-15 17:22:28 +0100130 constituents_offset =
131 memory_region->receivers[0].composite_memory_region_offset +
132 sizeof(struct ffa_composite_memory_region);
133 fragment_max_constituents =
134 (memory_region_max_size - constituents_offset) /
135 sizeof(struct ffa_memory_region_constituent);
136
137 count_to_copy = constituent_count;
138 if (count_to_copy > fragment_max_constituents) {
139 count_to_copy = fragment_max_constituents;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100140 }
141
J-Alvesf55620e2022-04-25 14:03:02 +0100142 for (i = 0U; i < constituent_count; i++) {
Andrew Walbranca808b12020-05-15 17:22:28 +0100143 if (i < count_to_copy) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700144 ffa_copy_memory_region_constituents(
145 &composite_memory_region->constituents[i],
146 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100147 }
148 composite_memory_region->page_count +=
149 constituents[i].page_count;
150 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100151
Andrew Walbranca808b12020-05-15 17:22:28 +0100152 if (total_length != NULL) {
153 *total_length =
154 constituents_offset +
155 composite_memory_region->constituent_count *
156 sizeof(struct ffa_memory_region_constituent);
157 }
158 if (fragment_length != NULL) {
159 *fragment_length =
160 constituents_offset +
161 count_to_copy *
162 sizeof(struct ffa_memory_region_constituent);
163 }
164
165 return composite_memory_region->constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100166}
167
168/**
J-Alvesf55620e2022-04-25 14:03:02 +0100169 * Initialises the given `ffa_memory_region` and copies as many as possible of
170 * the given constituents to it.
171 *
172 * Returns the number of constituents remaining which wouldn't fit, and (via
173 * return parameters) the size in bytes of the first fragment of data copied to
174 * `memory_region` (attributes, constituents and memory region header size), and
175 * the total size of the memory sharing message including all constituents.
176 */
177uint32_t ffa_memory_region_init_single_receiver(
178 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
179 ffa_vm_id_t sender, ffa_vm_id_t receiver,
180 const struct ffa_memory_region_constituent constituents[],
181 uint32_t constituent_count, uint32_t tag,
182 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
183 enum ffa_instruction_access instruction_access,
184 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
185 enum ffa_memory_shareability shareability, uint32_t *total_length,
186 uint32_t *fragment_length)
187{
J-Alvesf4eecf72022-07-20 16:05:34 +0100188 struct ffa_memory_access receiver_access;
189
190 ffa_memory_access_init_permissions(&receiver_access, receiver,
191 data_access, instruction_access, 0);
192
193 return ffa_memory_region_init(
194 memory_region, memory_region_max_size, sender, &receiver_access,
195 1, constituents, constituent_count, tag, flags, type,
196 cacheability, shareability, total_length, fragment_length);
197}
198
199uint32_t ffa_memory_region_init(
200 struct ffa_memory_region *memory_region, size_t memory_region_max_size,
201 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
202 uint32_t receiver_count,
203 const struct ffa_memory_region_constituent constituents[],
204 uint32_t constituent_count, uint32_t tag,
205 ffa_memory_region_flags_t flags, enum ffa_memory_type type,
206 enum ffa_memory_cacheability cacheability,
207 enum ffa_memory_shareability shareability, uint32_t *total_length,
208 uint32_t *fragment_length)
209{
J-Alvesf55620e2022-04-25 14:03:02 +0100210 ffa_memory_attributes_t attributes = 0;
211
J-Alvesf55620e2022-04-25 14:03:02 +0100212 /* Set memory region's page attributes. */
213 ffa_set_memory_type_attr(&attributes, type);
214 ffa_set_memory_cacheability_attr(&attributes, cacheability);
215 ffa_set_memory_shareability_attr(&attributes, shareability);
216
217 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alvesf4eecf72022-07-20 16:05:34 +0100218 0, tag, receiver_count);
219
220#if defined(__linux__) && defined(__KERNEL__)
221 memcpy(memory_region->receivers, receivers,
222 receiver_count * sizeof(struct ffa_memory_access));
223#else
224 memcpy_s(memory_region->receivers,
225 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
226 receivers, receiver_count * sizeof(struct ffa_memory_access));
227#endif
J-Alvesf55620e2022-04-25 14:03:02 +0100228
229 return ffa_memory_region_init_constituents(
230 memory_region, memory_region_max_size, constituents,
231 constituent_count, total_length, fragment_length);
232}
233
234/**
Andrew Walbranca808b12020-05-15 17:22:28 +0100235 * Initialises the given `ffa_memory_region` to be used for an
236 * `FFA_MEM_RETRIEVE_REQ` by the receiver of a memory transaction.
237 *
238 * Returns the size of the message written.
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100239 */
J-Alves9b24ed82022-08-04 13:12:45 +0100240uint32_t ffa_memory_retrieve_request_init_single_receiver(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100241 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
242 ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
243 ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
244 enum ffa_instruction_access instruction_access,
245 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
246 enum ffa_memory_shareability shareability)
247{
J-Alves9b24ed82022-08-04 13:12:45 +0100248 struct ffa_memory_access receiver_permissions;
249
250 ffa_memory_access_init_permissions(&receiver_permissions, receiver,
251 data_access, instruction_access, 0);
252
253 return ffa_memory_retrieve_request_init(
254 memory_region, handle, sender, &receiver_permissions, 1, tag,
255 flags, type, cacheability, shareability);
256}
257
258uint32_t ffa_memory_retrieve_request_init(
259 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
260 ffa_vm_id_t sender, struct ffa_memory_access receivers[],
261 uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
262 enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
263 enum ffa_memory_shareability shareability)
264{
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100265 ffa_memory_attributes_t attributes = 0;
J-Alves9b24ed82022-08-04 13:12:45 +0100266 uint32_t i;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100267
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100268 /* Set memory region's page attributes. */
269 ffa_set_memory_type_attr(&attributes, type);
270 ffa_set_memory_cacheability_attr(&attributes, cacheability);
271 ffa_set_memory_shareability_attr(&attributes, shareability);
272
Andrew Walbranca808b12020-05-15 17:22:28 +0100273 ffa_memory_region_init_header(memory_region, sender, attributes, flags,
J-Alves9b24ed82022-08-04 13:12:45 +0100274 handle, tag, receiver_count);
J-Alvesb44bb7d2022-04-25 15:43:46 +0100275
J-Alves9b24ed82022-08-04 13:12:45 +0100276#if defined(__linux__) && defined(__KERNEL__)
277 memcpy(memory_region->receivers, receivers,
278 receiver_count * sizeof(struct ffa_memory_access));
279#else
280 memcpy_s(memory_region->receivers,
281 MAX_MEM_SHARE_RECIPIENTS * sizeof(struct ffa_memory_access),
282 receivers, receiver_count * sizeof(struct ffa_memory_access));
283#endif
284 /* Zero the composite offset for all receivers */
285 for (i = 0U; i < receiver_count; i++) {
286 memory_region->receivers[i].composite_memory_region_offset = 0U;
287 }
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100288
289 return sizeof(struct ffa_memory_region) +
290 memory_region->receiver_count * sizeof(struct ffa_memory_access);
291}
292
Andrew Walbranca808b12020-05-15 17:22:28 +0100293/**
294 * Initialises the given `ffa_memory_region` to be used for an
295 * `FFA_MEM_RETRIEVE_REQ` from the hypervisor to the TEE.
296 *
297 * Returns the size of the message written.
298 */
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100299uint32_t ffa_memory_lender_retrieve_request_init(
300 struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
301 ffa_vm_id_t sender)
302{
303 memory_region->sender = sender;
304 memory_region->attributes = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100305 memory_region->flags = 0;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100306 memory_region->handle = handle;
307 memory_region->tag = 0;
308 memory_region->receiver_count = 0;
J-Alves0b6653d2022-04-22 13:17:38 +0100309#if defined(__linux__) && defined(__KERNEL__)
310 memset(memory_region->reserved, 0, sizeof(memory_region->reserved));
311#else
312 memset_s(memory_region->reserved, sizeof(memory_region->reserved), 0,
313 sizeof(memory_region->reserved));
314#endif
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100315 return sizeof(struct ffa_memory_region);
316}
317
Andrew Walbranca808b12020-05-15 17:22:28 +0100318/**
319 * Initialises the given `ffa_memory_region` to be used for an
320 * `FFA_MEM_RETRIEVE_RESP`, including the given constituents for the first
321 * fragment.
322 *
323 * Returns true on success, or false if the given constituents won't all fit in
324 * the first fragment.
325 */
326bool ffa_retrieved_memory_region_init(
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100327 struct ffa_memory_region *response, size_t response_max_size,
328 ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
329 ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
330 ffa_vm_id_t receiver, ffa_memory_access_permissions_t permissions,
Andrew Walbranca808b12020-05-15 17:22:28 +0100331 uint32_t page_count, uint32_t total_constituent_count,
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100332 const struct ffa_memory_region_constituent constituents[],
Andrew Walbranca808b12020-05-15 17:22:28 +0100333 uint32_t fragment_constituent_count, uint32_t *total_length,
334 uint32_t *fragment_length)
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100335{
Andrew Walbranca808b12020-05-15 17:22:28 +0100336 struct ffa_composite_memory_region *composite_memory_region;
337 uint32_t i;
338 uint32_t constituents_offset;
339
340 ffa_memory_region_init_header(response, sender, attributes, flags,
J-Alvesb44bb7d2022-04-25 15:43:46 +0100341 handle, 0, 1);
342 /*
343 * Initialized here as in memory retrieve responses we currently expect
344 * one borrower to be specified.
345 */
346 ffa_memory_access_init_permissions(&response->receivers[0], receiver, 0,
J-Alvesd9a13cd2022-06-15 18:38:11 +0100347 0, flags);
348 response->receivers[0].receiver_permissions.permissions = permissions;
J-Alvesb44bb7d2022-04-25 15:43:46 +0100349
Andrew Walbranca808b12020-05-15 17:22:28 +0100350 /*
351 * Note that `sizeof(struct_ffa_memory_region)` and `sizeof(struct
352 * ffa_memory_access)` must both be multiples of 16 (as verified by the
353 * asserts in `ffa_memory.c`, so it is guaranteed that the offset we
354 * calculate here is aligned to a 64-bit boundary and so 64-bit values
355 * can be copied without alignment faults.
356 */
357 response->receivers[0].composite_memory_region_offset =
358 sizeof(struct ffa_memory_region) +
359 response->receiver_count * sizeof(struct ffa_memory_access);
360
361 composite_memory_region = ffa_memory_region_get_composite(response, 0);
362 composite_memory_region->page_count = page_count;
363 composite_memory_region->constituent_count = total_constituent_count;
364 composite_memory_region->reserved_0 = 0;
365
366 constituents_offset =
367 response->receivers[0].composite_memory_region_offset +
368 sizeof(struct ffa_composite_memory_region);
369 if (constituents_offset +
370 fragment_constituent_count *
371 sizeof(struct ffa_memory_region_constituent) >
372 response_max_size) {
373 return false;
374 }
375
376 for (i = 0; i < fragment_constituent_count; ++i) {
377 composite_memory_region->constituents[i] = constituents[i];
378 }
379
380 if (total_length != NULL) {
381 *total_length =
382 constituents_offset +
383 composite_memory_region->constituent_count *
384 sizeof(struct ffa_memory_region_constituent);
385 }
386 if (fragment_length != NULL) {
387 *fragment_length =
388 constituents_offset +
389 fragment_constituent_count *
390 sizeof(struct ffa_memory_region_constituent);
391 }
392
393 return true;
394}
395
396uint32_t ffa_memory_fragment_init(
397 struct ffa_memory_region_constituent *fragment,
398 size_t fragment_max_size,
399 const struct ffa_memory_region_constituent constituents[],
400 uint32_t constituent_count, uint32_t *fragment_length)
401{
402 uint32_t fragment_max_constituents =
403 fragment_max_size /
404 sizeof(struct ffa_memory_region_constituent);
405 uint32_t count_to_copy = constituent_count;
406 uint32_t i;
407
408 if (count_to_copy > fragment_max_constituents) {
409 count_to_copy = fragment_max_constituents;
410 }
411
412 for (i = 0; i < count_to_copy; ++i) {
Raghu Krishnamurthy03f21d22021-11-02 18:39:23 -0700413 ffa_copy_memory_region_constituents(&fragment[i],
414 &constituents[i]);
Andrew Walbranca808b12020-05-15 17:22:28 +0100415 }
416
417 if (fragment_length != NULL) {
418 *fragment_length = count_to_copy *
419 sizeof(struct ffa_memory_region_constituent);
420 }
421
422 return constituent_count - count_to_copy;
Andrew Walbranb5ab43c2020-04-30 11:32:54 +0100423}
Federico Recanati392be392022-02-08 20:53:03 +0100424
425static void ffa_composite_memory_region_init(
426 struct ffa_composite_memory_region *composite, uint64_t address,
427 uint32_t page_count)
428{
429 composite->page_count = page_count;
430 composite->constituent_count = 1;
431 composite->reserved_0 = 0;
432
433 composite->constituents[0].page_count = page_count;
434 composite->constituents[0].address = address;
435 composite->constituents[0].reserved = 0;
436}
437
438/**
439 * Initialises the given `ffa_endpoint_rx_tx_descriptor` to be used for an
440 * `FFA_RXTX_MAP` forwarding.
441 * Each buffer is described by an `ffa_composite_memory_region` containing
442 * one `ffa_memory_region_constituent`.
443 */
444void ffa_endpoint_rx_tx_descriptor_init(
445 struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
446 uint64_t rx_address, uint64_t tx_address)
447{
448 desc->endpoint_id = endpoint_id;
449 desc->reserved = 0;
450 desc->pad = 0;
451
452 /*
453 * RX's composite descriptor is allocated after the enpoint descriptor.
454 * `sizeof(struct ffa_endpoint_rx_tx_descriptor)` is guaranteed to be
455 * 16-byte aligned.
456 */
457 desc->rx_offset = sizeof(struct ffa_endpoint_rx_tx_descriptor);
458
459 ffa_composite_memory_region_init(
460 (struct ffa_composite_memory_region *)((uintptr_t)desc +
461 desc->rx_offset),
462 rx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
463
464 /*
465 * TX's composite descriptor is allocated after the RX descriptor.
466 * `sizeof(struct ffa_composite_memory_region)` and
467 * `sizeof(struct ffa_memory_region_constituent)` are guaranteed to be
468 * 16-byte aligned in ffa_memory.c.
469 */
470 desc->tx_offset = desc->rx_offset +
471 sizeof(struct ffa_composite_memory_region) +
472 sizeof(struct ffa_memory_region_constituent);
473
474 ffa_composite_memory_region_init(
475 (struct ffa_composite_memory_region *)((uintptr_t)desc +
476 desc->tx_offset),
477 tx_address, HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
478}