Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
Andrew Walbran | e959ec1 | 2020-06-17 15:01:09 +0100 | [diff] [blame] | 4 | * Use of this source code is governed by a BSD-style |
| 5 | * license that can be found in the LICENSE file or at |
| 6 | * https://opensource.org/licenses/BSD-3-Clause. |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #pragma once |
| 10 | |
| 11 | #include "hf/mpool.h" |
| 12 | #include "hf/vm.h" |
| 13 | |
| 14 | #include "vmapi/hf/ffa.h" |
| 15 | |
Daniel Boulby | c7dc932 | 2023-10-27 15:12:07 +0100 | [diff] [blame^] | 16 | bool ffa_memory_region_sanity_check(struct ffa_memory_region *memory_region, |
| 17 | uint32_t ffa_version, |
| 18 | uint32_t fragment_length, |
| 19 | bool send_transaction); |
| 20 | |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 21 | struct ffa_value ffa_memory_send(struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 22 | struct ffa_memory_region *memory_region, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 23 | uint32_t memory_share_length, |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 24 | uint32_t fragment_length, uint32_t share_func, |
| 25 | struct mpool *page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 26 | struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked, |
| 27 | void *fragment, |
| 28 | uint32_t fragment_length, |
| 29 | ffa_memory_handle_t handle, |
| 30 | struct mpool *page_pool); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 31 | struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked, |
| 32 | struct ffa_memory_region *retrieve_request, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 33 | uint32_t retrieve_request_length, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 34 | struct mpool *page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 35 | struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked, |
| 36 | ffa_memory_handle_t handle, |
| 37 | uint32_t fragment_offset, |
J-Alves | 19e20cf | 2023-08-02 12:48:55 +0100 | [diff] [blame] | 38 | ffa_id_t sender_vm_id, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 39 | struct mpool *page_pool); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 40 | struct ffa_value ffa_memory_relinquish( |
| 41 | struct vm_locked from_locked, |
| 42 | struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool); |
| 43 | struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 44 | ffa_memory_handle_t handle, |
| 45 | ffa_memory_region_flags_t flags, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 46 | struct mpool *page_pool); |