blob: ca7614313d2ca25ca8de40b815994190658ea420 [file] [log] [blame]
Karl Meakin5a365d32024-11-08 23:55:03 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/arch/other_world.h"
10#include "hf/arch/plat/ffa/indirect_messaging.h"
11
12#include "hf/api.h"
13#include "hf/vcpu.h"
14
15bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
16 struct ffa_value *ret)
17{
18 /*
19 * VM's requests should be forwarded to the SPMC, if target is an SP.
20 */
21 if (!vm_id_is_current_world(vm_id)) {
22 *ret = arch_other_world_call_ext((struct ffa_value){
23 .func = FFA_RUN_32, ffa_vm_vcpu(vm_id, vcpu_idx)});
24 return true;
25 }
26
27 return false;
28}
29
30/**
31 * Check if current VM can resume target VM/SP using FFA_RUN ABI.
32 */
33bool plat_ffa_run_checks(struct vcpu_locked current_locked,
34 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
35 struct ffa_value *run_ret, struct vcpu **next)
36{
37 (void)next;
38 (void)vcpu_idx;
39
40 /* Only the primary VM can switch vCPUs. */
41 if (!vm_is_primary(current_locked.vcpu->vm)) {
42 run_ret->arg2 = FFA_DENIED;
43 return false;
44 }
45
46 /* Only secondary VM vCPUs can be run. */
47 if (target_vm_id == HF_PRIMARY_VM_ID) {
48 return false;
49 }
50
51 return true;
52}
53
54/**
55 * The invocation of FFA_MSG_WAIT at non-secure virtual FF-A instance is made
56 * to be compliant with version v1.0 of the FF-A specification. It serves as
57 * a blocking call.
58 */
59struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
60 struct vcpu **next)
61{
62 return plat_ffa_msg_recv(true, current_locked, next);
63}
64
65bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
66 ffa_id_t vm_id,
67 ffa_id_t receiver_vm_id,
68 struct vcpu_locked receiver_locked,
69 uint32_t func,
70 enum vcpu_state *next_state)
71{
72 (void)current_locked;
73 (void)vm_id;
74 (void)receiver_vm_id;
75 (void)receiver_locked;
76
77 switch (func) {
78 case FFA_YIELD_32:
79 /* Fall through. */
80 case FFA_MSG_SEND_DIRECT_REQ_64:
81 case FFA_MSG_SEND_DIRECT_REQ_32:
82 case FFA_MSG_SEND_DIRECT_REQ2_64:
83 case FFA_RUN_32:
84 *next_state = VCPU_STATE_BLOCKED;
85 return true;
86 case FFA_MSG_WAIT_32:
87 /* Fall through. */
88 case FFA_MSG_SEND_DIRECT_RESP_64:
89 case FFA_MSG_SEND_DIRECT_RESP_32:
90 case FFA_MSG_SEND_DIRECT_RESP2_64:
91 *next_state = VCPU_STATE_WAITING;
92 return true;
93 default:
94 return false;
95 }
96}
97
98void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
99 struct vcpu_locked target_locked)
100{
101 /* Scheduling mode not supported in the Hypervisor/VMs. */
102 (void)current_locked;
103 (void)target_locked;
104}
105
106/*
107 * Prepare to yield execution back to the VM that allocated cpu cycles and move
108 * to BLOCKED state.
109 */
110struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
111 struct vcpu **next,
112 uint32_t timeout_low,
113 uint32_t timeout_high)
114{
115 struct vcpu *current = current_locked.vcpu;
116 struct ffa_value ret = {
117 .func = FFA_YIELD_32,
118 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
119 .arg2 = timeout_low,
120 .arg3 = timeout_high,
121 };
122
123 /*
124 * Return execution to primary VM.
125 */
126 *next = api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
127
128 return (struct ffa_value){.func = FFA_SUCCESS_32};
129}