Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google LLC |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * https://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | extern "C" { |
| 18 | #include "vmapi/hf/abi.h" |
| 19 | } |
| 20 | |
| 21 | #include <gmock/gmock.h> |
| 22 | |
| 23 | namespace |
| 24 | { |
| 25 | using ::testing::Eq; |
| 26 | |
| 27 | /** |
| 28 | * Simulate an uninitialized hf_vcpu_run_return so it can be detected if any |
| 29 | * uninitialized fields make their way into the encoded form which would |
| 30 | * indicate a data leak. |
| 31 | */ |
| 32 | struct hf_vcpu_run_return dirty_vcpu_run_return() |
| 33 | { |
| 34 | struct hf_vcpu_run_return res; |
| 35 | memset(&res, 0xc5, sizeof(res)); |
| 36 | return res; |
| 37 | } |
| 38 | |
| 39 | /** |
| 40 | * Simulate an uninitialized hf_mailbox_receive_return so it can be detected if |
| 41 | * any uninitialized fields make their way into the encoded form which would |
| 42 | * indicate a data leak. |
| 43 | */ |
| 44 | struct hf_mailbox_receive_return dirty_mailbox_receive_return() |
| 45 | { |
| 46 | struct hf_mailbox_receive_return res; |
| 47 | memset(&res, 0xc5, sizeof(res)); |
| 48 | return res; |
| 49 | } |
| 50 | |
| 51 | /** |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 52 | * Encode a preempted response without leaking. |
| 53 | */ |
| 54 | TEST(abi, hf_vcpu_run_return_encode_preempted) |
| 55 | { |
| 56 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 57 | res.code = HF_VCPU_RUN_PREEMPTED; |
| 58 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0)); |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * Decode a preempted response ignoring the irrelevant bits. |
| 63 | */ |
| 64 | TEST(abi, hf_vcpu_run_return_decode_preempted) |
| 65 | { |
| 66 | struct hf_vcpu_run_return res = |
| 67 | hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b00); |
| 68 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_PREEMPTED)); |
| 69 | } |
| 70 | |
| 71 | /** |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 72 | * Encode a yield response without leaking. |
| 73 | */ |
| 74 | TEST(abi, hf_vcpu_run_return_encode_yield) |
| 75 | { |
| 76 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 77 | res.code = HF_VCPU_RUN_YIELD; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 78 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(1)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 79 | } |
| 80 | |
| 81 | /** |
| 82 | * Decode a yield response ignoring the irrelevant bits. |
| 83 | */ |
| 84 | TEST(abi, hf_vcpu_run_return_decode_yield) |
| 85 | { |
| 86 | struct hf_vcpu_run_return res = |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 87 | hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b01); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 88 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_YIELD)); |
| 89 | } |
| 90 | |
| 91 | /** |
| 92 | * Encode wait-for-interrupt response without leaking. |
| 93 | */ |
| 94 | TEST(abi, hf_vcpu_run_return_encode_wait_for_interrupt) |
| 95 | { |
| 96 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 97 | res.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 98 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(2)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | /** |
| 102 | * Decode a wait-for-interrupt response ignoring the irrelevant bits. |
| 103 | */ |
| 104 | TEST(abi, hf_vcpu_run_return_decode_wait_for_interrupt) |
| 105 | { |
| 106 | struct hf_vcpu_run_return res = |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 107 | hf_vcpu_run_return_decode(0x1234abcdbadb0102); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 108 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAIT_FOR_INTERRUPT)); |
| 109 | } |
| 110 | |
| 111 | /** |
| 112 | * Encode wake up response without leaking. |
| 113 | */ |
| 114 | TEST(abi, hf_vcpu_run_return_encode_wake_up) |
| 115 | { |
| 116 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 117 | res.code = HF_VCPU_RUN_WAKE_UP; |
| 118 | res.wake_up.vm_id = 0x12345678; |
| 119 | res.wake_up.vcpu = 0xabcd; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 120 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x12345678abcd0003)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | /** |
| 124 | * Decode a wake up response ignoring the irrelevant bits. |
| 125 | */ |
| 126 | TEST(abi, hf_vcpu_run_return_decode_wake_up) |
| 127 | { |
| 128 | struct hf_vcpu_run_return res = |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 129 | hf_vcpu_run_return_decode(0xbeefd00df00daf03); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 130 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_WAKE_UP)); |
| 131 | EXPECT_THAT(res.wake_up.vm_id, Eq(0xbeefd00d)); |
| 132 | EXPECT_THAT(res.wake_up.vcpu, Eq(0xf00d)); |
| 133 | } |
| 134 | |
| 135 | /** |
| 136 | * Encode message response without leaking. |
| 137 | */ |
| 138 | TEST(abi, hf_vcpu_run_return_encode_message) |
| 139 | { |
| 140 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 141 | res.code = HF_VCPU_RUN_MESSAGE; |
| 142 | res.message.size = 0xdeadbeef; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 143 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xdeadbeef00000004)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | /** |
| 147 | * Decode a wake up response ignoring the irrelevant bits. |
| 148 | */ |
| 149 | TEST(abi, hf_vcpu_run_return_decode_message) |
| 150 | { |
| 151 | struct hf_vcpu_run_return res = |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 152 | hf_vcpu_run_return_decode(0x1123581314916204); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 153 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_MESSAGE)); |
| 154 | EXPECT_THAT(res.message.size, Eq(0x11235813)); |
| 155 | } |
| 156 | |
| 157 | /** |
| 158 | * Encode sleep response without leaking. |
| 159 | */ |
| 160 | TEST(abi, hf_vcpu_run_return_encode_sleep) |
| 161 | { |
| 162 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 163 | res.code = HF_VCPU_RUN_SLEEP; |
| 164 | res.sleep.ns = 0xcafed00dfeeded; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 165 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0xcafed00dfeeded05)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | /** |
| 169 | * Encoding a sleep response with too large a sleep duration will drop the top |
| 170 | * octet. |
| 171 | */ |
| 172 | TEST(abi, hf_vcpu_run_return_encode_sleep_too_long) |
| 173 | { |
| 174 | struct hf_vcpu_run_return res = dirty_vcpu_run_return(); |
| 175 | res.code = HF_VCPU_RUN_SLEEP; |
| 176 | res.sleep.ns = 0xcc88888888888888; |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 177 | EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(0x8888888888888805)); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /** |
| 181 | * Decode a sleep response. |
| 182 | */ |
| 183 | TEST(abi, hf_vcpu_run_return_decode_sleep) |
| 184 | { |
| 185 | struct hf_vcpu_run_return res = |
Andrew Scull | 33fecd3 | 2019-01-08 14:48:27 +0000 | [diff] [blame^] | 186 | hf_vcpu_run_return_decode(0x1a2b3c4d5e6f7705); |
Andrew Scull | 6d2db33 | 2018-10-10 15:28:17 +0100 | [diff] [blame] | 187 | EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_SLEEP)); |
| 188 | EXPECT_THAT(res.sleep.ns, Eq(0x1a2b3c4d5e6f77)); |
| 189 | } |
| 190 | |
| 191 | /** |
| 192 | * Encode a mailbox receive response without leaking. |
| 193 | */ |
| 194 | TEST(abi, hf_mailbox_receive_return_encode) |
| 195 | { |
| 196 | struct hf_mailbox_receive_return res = dirty_mailbox_receive_return(); |
| 197 | res.vm_id = 0x12345678; |
| 198 | res.size = 0xaabbccdd; |
| 199 | EXPECT_THAT(hf_mailbox_receive_return_encode(res), |
| 200 | Eq(0xaabbccdd12345678)); |
| 201 | } |
| 202 | |
| 203 | /** |
| 204 | * Decode a mailbox receive response. |
| 205 | */ |
| 206 | TEST(abi, hf_mailbox_receive_return_decode) |
| 207 | { |
| 208 | struct hf_mailbox_receive_return res = |
| 209 | hf_mailbox_receive_return_decode(0X8badf00d00ddba11); |
| 210 | EXPECT_THAT(res.vm_id, Eq(0X00ddba11)); |
| 211 | EXPECT_THAT(res.size, Eq(0x8badf00d)); |
| 212 | } |
| 213 | |
| 214 | } /* namespace */ |