Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019 The Hafnium Authors. |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #pragma once |
| 18 | |
| 19 | #include "hf/mpool.h" |
| 20 | #include "hf/vm.h" |
| 21 | |
| 22 | #include "vmapi/hf/ffa.h" |
| 23 | |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 24 | struct ffa_value ffa_memory_send(struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 25 | struct ffa_memory_region *memory_region, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 26 | uint32_t memory_share_length, |
Andrew Walbran | 1a86aa9 | 2020-05-15 17:22:28 +0100 | [diff] [blame] | 27 | uint32_t fragment_length, uint32_t share_func, |
| 28 | struct mpool *page_pool); |
| 29 | struct ffa_value ffa_memory_tee_send( |
| 30 | struct vm_locked from_locked, struct vm_locked to_locked, |
| 31 | struct ffa_memory_region *memory_region, uint32_t memory_share_length, |
| 32 | uint32_t fragment_length, uint32_t share_func, struct mpool *page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame^] | 33 | struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked, |
| 34 | void *fragment, |
| 35 | uint32_t fragment_length, |
| 36 | ffa_memory_handle_t handle, |
| 37 | struct mpool *page_pool); |
| 38 | struct ffa_value ffa_memory_tee_send_continue(struct vm_locked from_locked, |
| 39 | struct vm_locked to_locked, |
| 40 | void *fragment, |
| 41 | uint32_t fragment_length, |
| 42 | ffa_memory_handle_t handle, |
| 43 | struct mpool *page_pool); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 44 | struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked, |
| 45 | struct ffa_memory_region *retrieve_request, |
Andrew Walbran | 130a8ae | 2020-05-15 16:27:15 +0100 | [diff] [blame] | 46 | uint32_t retrieve_request_length, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 47 | struct mpool *page_pool); |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame^] | 48 | struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked, |
| 49 | ffa_memory_handle_t handle, |
| 50 | uint32_t fragment_offset, |
| 51 | struct mpool *page_pool); |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 52 | struct ffa_value ffa_memory_relinquish( |
| 53 | struct vm_locked from_locked, |
| 54 | struct ffa_mem_relinquish *relinquish_request, struct mpool *page_pool); |
| 55 | struct ffa_value ffa_memory_reclaim(struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame^] | 56 | ffa_memory_handle_t handle, |
| 57 | ffa_memory_region_flags_t flags, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 58 | struct mpool *page_pool); |
| 59 | struct ffa_value ffa_memory_tee_reclaim(struct vm_locked to_locked, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame^] | 60 | struct vm_locked from_locked, |
Andrew Walbran | b5ab43c | 2020-04-30 11:32:54 +0100 | [diff] [blame] | 61 | ffa_memory_handle_t handle, |
Andrew Walbran | ca808b1 | 2020-05-15 17:22:28 +0100 | [diff] [blame^] | 62 | ffa_memory_region_flags_t flags, |
| 63 | struct mpool *page_pool); |