blob: 5ec1a459300de13be2e2ee6e66bc5fbb6552359e [file] [log] [blame]
Karl Meakin5a365d32024-11-08 23:55:03 +00001/*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9#include "hf/arch/other_world.h"
Karl Meakin5a365d32024-11-08 23:55:03 +000010
11#include "hf/api.h"
Karl Meakin902af082024-11-28 14:58:38 +000012#include "hf/ffa/indirect_messaging.h"
Madhukar Pappireddy04cf9e82025-03-17 17:32:02 -050013#include "hf/ffa/vm.h"
Karl Meakin936ec1e2025-01-31 13:17:11 +000014#include "hf/ffa_internal.h"
Karl Meakin5a365d32024-11-08 23:55:03 +000015#include "hf/vcpu.h"
16
Karl Meakin117c8082024-12-04 16:03:28 +000017bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
18 struct ffa_value *ret)
Karl Meakin5a365d32024-11-08 23:55:03 +000019{
20 /*
21 * VM's requests should be forwarded to the SPMC, if target is an SP.
22 */
23 if (!vm_id_is_current_world(vm_id)) {
24 *ret = arch_other_world_call_ext((struct ffa_value){
25 .func = FFA_RUN_32, ffa_vm_vcpu(vm_id, vcpu_idx)});
26 return true;
27 }
28
29 return false;
30}
31
32/**
33 * Check if current VM can resume target VM/SP using FFA_RUN ABI.
34 */
Karl Meakin117c8082024-12-04 16:03:28 +000035bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
36 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
37 struct ffa_value *run_ret, struct vcpu **next)
Karl Meakin5a365d32024-11-08 23:55:03 +000038{
39 (void)next;
40 (void)vcpu_idx;
41
42 /* Only the primary VM can switch vCPUs. */
43 if (!vm_is_primary(current_locked.vcpu->vm)) {
44 run_ret->arg2 = FFA_DENIED;
45 return false;
46 }
47
48 /* Only secondary VM vCPUs can be run. */
49 if (target_vm_id == HF_PRIMARY_VM_ID) {
50 return false;
51 }
52
53 return true;
54}
55
56/**
57 * The invocation of FFA_MSG_WAIT at non-secure virtual FF-A instance is made
58 * to be compliant with version v1.0 of the FF-A specification. It serves as
59 * a blocking call.
60 */
Karl Meakin117c8082024-12-04 16:03:28 +000061struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
62 struct vcpu_locked current_locked, struct vcpu **next)
Karl Meakin5a365d32024-11-08 23:55:03 +000063{
Karl Meakin117c8082024-12-04 16:03:28 +000064 return ffa_indirect_msg_recv(true, current_locked, next);
Karl Meakin5a365d32024-11-08 23:55:03 +000065}
66
Karl Meakin117c8082024-12-04 16:03:28 +000067bool ffa_cpu_cycles_check_runtime_state_transition(
68 struct vcpu_locked current_locked, ffa_id_t vm_id,
69 ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
70 uint32_t func, enum vcpu_state *next_state)
Karl Meakin5a365d32024-11-08 23:55:03 +000071{
72 (void)current_locked;
73 (void)vm_id;
74 (void)receiver_vm_id;
75 (void)receiver_locked;
76
77 switch (func) {
78 case FFA_YIELD_32:
Karl Meakin402b1fe2025-03-20 14:52:55 +000079 [[fallthrough]];
Karl Meakin5a365d32024-11-08 23:55:03 +000080 case FFA_MSG_SEND_DIRECT_REQ_64:
81 case FFA_MSG_SEND_DIRECT_REQ_32:
82 case FFA_MSG_SEND_DIRECT_REQ2_64:
83 case FFA_RUN_32:
84 *next_state = VCPU_STATE_BLOCKED;
85 return true;
86 case FFA_MSG_WAIT_32:
Karl Meakin402b1fe2025-03-20 14:52:55 +000087 [[fallthrough]];
Karl Meakin5a365d32024-11-08 23:55:03 +000088 case FFA_MSG_SEND_DIRECT_RESP_64:
89 case FFA_MSG_SEND_DIRECT_RESP_32:
90 case FFA_MSG_SEND_DIRECT_RESP2_64:
91 *next_state = VCPU_STATE_WAITING;
92 return true;
93 default:
94 return false;
95 }
96}
97
J-Alves5d65fe72025-02-18 16:02:32 +000098void ffa_cpu_cycles_init_schedule_mode_ffa_run(
Karl Meakin117c8082024-12-04 16:03:28 +000099 struct vcpu_locked current_locked, struct vcpu_locked target_locked)
Karl Meakin5a365d32024-11-08 23:55:03 +0000100{
101 /* Scheduling mode not supported in the Hypervisor/VMs. */
102 (void)current_locked;
103 (void)target_locked;
104}
105
106/*
107 * Prepare to yield execution back to the VM that allocated cpu cycles and move
108 * to BLOCKED state.
109 */
Karl Meakin117c8082024-12-04 16:03:28 +0000110struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
111 struct vcpu **next,
112 uint32_t timeout_low,
113 uint32_t timeout_high)
Karl Meakin5a365d32024-11-08 23:55:03 +0000114{
115 struct vcpu *current = current_locked.vcpu;
116 struct ffa_value ret = {
117 .func = FFA_YIELD_32,
118 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
119 .arg2 = timeout_low,
120 .arg3 = timeout_high,
121 };
122
123 /*
124 * Return execution to primary VM.
125 */
126 *next = api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
127
128 return (struct ffa_value){.func = FFA_SUCCESS_32};
129}
Karl Meakin936ec1e2025-01-31 13:17:11 +0000130
Karl Meakinfa1dcb82025-02-10 16:47:50 +0000131struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
132 struct vcpu **next,
133 enum ffa_error error_code)
Karl Meakin936ec1e2025-01-31 13:17:11 +0000134{
135 (void)current;
136 (void)next;
137 (void)error_code;
138 /* TODO: Interface not handled in hypervisor. */
139 return ffa_error(FFA_NOT_SUPPORTED);
140}
Madhukar Pappireddy04cf9e82025-03-17 17:32:02 -0500141
142struct ffa_value ffa_cpu_cycles_abort(struct vcpu_locked *current_locked,
143 struct vcpu **next)
144{
145 struct ffa_value to_ret = ffa_error(FFA_ABORTED);
146
147 *next = api_switch_to_primary(*current_locked, to_ret,
148 VCPU_STATE_ABORTED);
149
150 return (struct ffa_value){.func = FFA_SUCCESS_32};
151}