Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 1 | #!/bin/bash |
| 2 | # |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 3 | # Copyright (c) 2019-2020, Arm Limited. All rights reserved. |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 4 | # |
| 5 | # SPDX-License-Identifier: BSD-3-Clause |
| 6 | # |
| 7 | |
| 8 | set -e |
| 9 | |
| 10 | # Enable job control to have background processes run in their own process |
| 11 | # group. That way, we can kill a background process group in one go. |
| 12 | set -m |
| 13 | |
| 14 | ci_root="$(readlink -f "$(dirname "$0")/..")" |
| 15 | source "$ci_root/utils.sh" |
| 16 | |
| 17 | artefacts="${artefacts-$workspace/artefacts}" |
| 18 | |
| 19 | run_root="$workspace/run" |
| 20 | pid_dir="$workspace/pids" |
| 21 | |
| 22 | mkdir -p "$pid_dir" |
| 23 | mkdir -p "$run_root" |
| 24 | |
| 25 | kill_and_reap() { |
| 26 | local gid |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 27 | # Kill an active process. Ignore errors |
| 28 | [ "$1" ] || return 0 |
| 29 | kill -0 "$1" &>/dev/null || return 0 |
| 30 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 31 | # Kill the children |
| 32 | kill -- "-$1" &>/dev/null || true |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 33 | # Kill the group |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 34 | { gid="$(awk '{print $5}' < /proc/$1/stat)";} 2>/dev/null || return |
| 35 | # For Code Coverage plugin it is needed to propagate |
| 36 | # the kill signal to the plugin in order to save |
| 37 | # the trace statistics. |
| 38 | if [ "${COVERAGE_ON}" == "1" ] || [ -n "$cc_enable" ]; then |
| 39 | kill -SIGTERM -- "-$gid" &>/dev/null || true |
| 40 | else |
| 41 | kill -SIGKILL -- "-$gid" &>/dev/null || true |
| 42 | fi |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 43 | wait "$gid" &>/dev/null || true |
| 44 | } |
| 45 | |
| 46 | # Perform clean up and ignore errors |
| 47 | cleanup() { |
| 48 | local pid |
| 49 | |
| 50 | # Test success. Kill all background processes so far and wait for them |
| 51 | pushd "$pid_dir" |
| 52 | set +e |
| 53 | while read pid; do |
| 54 | pid="$(cat $pid)" |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 55 | # Forcefully killing model process does not show statistical |
| 56 | # data (Host CPU time spent running in User and System). Safely |
| 57 | # kill the model by using SIGINT(^C) that helps in printing |
| 58 | # statistical data. |
| 59 | if [ "$pid" == "$model_pid" ]; then |
| 60 | model_cid=$(pgrep -P "$model_pid" | xargs) |
| 61 | # ignore errors |
| 62 | kill -SIGINT "$model_cid" &>/dev/null || true |
| 63 | # Allow some time to print data |
| 64 | sleep 2 |
| 65 | else |
| 66 | kill_and_reap "$pid" |
| 67 | fi |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 68 | done < <(find -name '*.pid') |
| 69 | popd |
| 70 | } |
| 71 | |
| 72 | # Launch a program. Have its PID saved in a file with given name with .pid |
| 73 | # suffix. When the program exits, create a file with .success suffix, or one |
| 74 | # with .fail if it fails. This function blocks, so the caller must '&' this if |
| 75 | # they want to continue. Call must wait for $pid_dir/$name.pid to be created |
| 76 | # should it want to read it. |
| 77 | launch() { |
| 78 | local pid |
| 79 | |
| 80 | "$@" & |
| 81 | pid="$!" |
| 82 | echo "$pid" > "$pid_dir/${name:?}.pid" |
| 83 | if wait "$pid"; then |
| 84 | touch "$pid_dir/$name.success" |
| 85 | else |
| 86 | touch "$pid_dir/$name.fail" |
| 87 | fi |
| 88 | } |
| 89 | |
| 90 | # Cleanup actions |
| 91 | trap cleanup SIGINT SIGHUP SIGTERM EXIT |
| 92 | |
| 93 | # Prevent xterm windows from untracked terminals from popping up, especially |
| 94 | # when running locally |
| 95 | not_upon "$test_run" && export DISPLAY= |
| 96 | |
| 97 | # Source variables required for run |
| 98 | source "$artefacts/env" |
| 99 | |
| 100 | echo |
| 101 | echo "RUNNING: $TEST_CONFIG" |
| 102 | echo |
| 103 | |
| 104 | # Accept BIN_MODE from environment, or default to release. If bin_mode is set |
| 105 | # and non-empty (intended to be set from command line), that takes precedence. |
| 106 | pkg_bin_mode="${BIN_MODE:-release}" |
| 107 | bin_mode="${bin_mode:-$pkg_bin_mode}" |
| 108 | |
| 109 | # Assume 0 is the primary UART to track |
| 110 | primary_uart=0 |
| 111 | |
| 112 | # Assume 4 UARTs by default |
| 113 | num_uarts="${num_uarts:-4}" |
| 114 | |
| 115 | # Whether to display primary UART progress live on the console |
| 116 | primary_live="${primary_live-$PRIMARY_LIVE}" |
| 117 | |
| 118 | # Change directory so that all binaries can be accessed realtive to where they |
| 119 | # lie |
| 120 | run_cwd="$artefacts/$bin_mode" |
| 121 | cd "$run_cwd" |
| 122 | |
| 123 | # Source environment for run |
| 124 | if [ -f "run/env" ]; then |
| 125 | source "run/env" |
| 126 | fi |
| 127 | |
| 128 | # Fail if there was no model path set |
| 129 | if [ -z "$model_path" ]; then |
| 130 | die "No model path set by package!" |
| 131 | fi |
| 132 | |
| 133 | # Launch model with parameters |
| 134 | model_out="$run_root/model_log.txt" |
| 135 | run_sh="$run_root/run.sh" |
| 136 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 137 | |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 138 | # Generate run.sh |
| 139 | echo "$model_path \\" > "$run_sh" |
| 140 | sed '/^\s*$/d' < model_params | sort | sed 's/^/\t/;s/$/ \\/' >> "$run_sh" |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 141 | |
| 142 | if [ "${COVERAGE_ON}" == "1" ]; then |
| 143 | # Adding code coverage plugin |
| 144 | echo -e "\t-C TRACE.CoverageTrace.trace-file-prefix=$trace_file_prefix \\" >> "$run_sh" |
| 145 | echo -e "\t--plugin $coverage_trace_plugin \\" >> "$run_sh" |
| 146 | fi |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 147 | echo -e "\t\"\$@\"" >> "$run_sh" |
| 148 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 149 | # Running Reboot/Shutdown tests requires storing the state in non-volatile |
| 150 | # memory(NVM) across reboot. On FVP, NVM is not persistent across reboot, hence |
| 151 | # NVM was saved to a file($NVM_file) when running the model using the run.sh |
| 152 | # shell script. |
| 153 | # If TFTF Reboot/Shutdown tests are enabled, run the fvp model 10 times by |
| 154 | # feeding the file containing NVM state generated from the previous run. Note |
| 155 | # that this file also includes FIP image. |
| 156 | |
| 157 | if upon "$run_tftf_reboot_tests" = "1"; then |
| 158 | tftf_reboot_tests="$run_root/tftf_reboot_tests.sh" |
| 159 | |
| 160 | # Generate tftf_reboot_tests command. It is similar to run_sh. |
| 161 | # The model would run the reboot and shutdown tests 10 times |
| 162 | # The uart log file generated by FVP model gets overwritten |
| 163 | # across reboots. Copy its contents at the end of the test |
| 164 | echo "cat $uart0_file >> UART0.log" >>"$tftf_reboot_tests" |
| 165 | echo "cat $uart1_file >> UART1.log" >>"$tftf_reboot_tests" |
| 166 | cat <<EOF >>"$tftf_reboot_tests" |
| 167 | |
| 168 | for i in {1..10} |
| 169 | do |
| 170 | EOF |
| 171 | cat "$run_sh" >> "$tftf_reboot_tests" |
| 172 | echo "cat $uart0_file >> UART0.log" >>"$tftf_reboot_tests" |
| 173 | echo "cat $uart1_file >> UART1.log" >>"$tftf_reboot_tests" |
| 174 | cat <<EOF >>"$tftf_reboot_tests" |
| 175 | done |
| 176 | EOF |
| 177 | #Replace fip.bin with file $NVM_file |
| 178 | sed -i 's/fip.bin/'"$NVM_file"'/' "$tftf_reboot_tests" |
| 179 | |
| 180 | echo "TFTF Reboot/Shutdown Tests Enabled" |
| 181 | cat "$tftf_reboot_tests" >> "$run_sh" |
| 182 | rm "$tftf_reboot_tests" |
| 183 | fi |
| 184 | |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 185 | echo "Model command line:" |
| 186 | echo |
| 187 | cat "$run_sh" |
| 188 | chmod +x "$run_sh" |
| 189 | echo |
| 190 | |
| 191 | # If it's a test run, skip all the hoops and launch model directly. |
| 192 | if upon "$test_run"; then |
| 193 | "$run_sh" "$@" |
| 194 | exit 0 |
| 195 | fi |
| 196 | |
| 197 | # For an automated run, export a known variable so that we can identify stale |
| 198 | # processes spawned by Trusted Firmware CI by inspecting its environment. |
| 199 | export TRUSTED_FIRMWARE_CI="1" |
| 200 | |
| 201 | # Change directory to workspace, as all artifacts paths are relative to |
| 202 | # that, and launch the model. Have model use no buffering on stdout |
| 203 | : >"$model_out" |
| 204 | name="model" launch stdbuf -o0 -e0 "$run_sh" &>"$model_out" & |
| 205 | wait_count=0 |
| 206 | while :; do |
| 207 | if [ -f "$pid_dir/model.pid" ]; then |
| 208 | break |
| 209 | fi |
| 210 | sleep 0.1 |
| 211 | |
| 212 | let "wait_count += 1" |
| 213 | if [ "$wait_count" -gt 100 ]; then |
| 214 | die "Failed to launch model!" |
| 215 | fi |
| 216 | done |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 217 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 218 | model_pid="$(cat $pid_dir/model.pid)" |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 219 | ports_output="$(mktempfile)" |
| 220 | if not_upon "$ports_script"; then |
| 221 | # Default AWK script to parse model ports |
| 222 | ports_script="$(mktempfile)" |
| 223 | cat <<'EOF' >"$ports_script" |
| 224 | /terminal_0/ { ports[0] = $NF } |
| 225 | /terminal_1/ { ports[1] = $NF } |
| 226 | /terminal_2/ { ports[2] = $NF } |
| 227 | /terminal_3/ { ports[3] = $NF } |
| 228 | END { |
| 229 | for (i = 0; i < num_uarts; i++) { |
| 230 | if (ports[i] != "") |
| 231 | print "ports[" i "]=" ports[i] |
| 232 | } |
| 233 | } |
| 234 | EOF |
| 235 | fi |
| 236 | |
| 237 | # Start a watchdog to kill ourselves if we wait too long for the model |
| 238 | # response. Note that this is not the timeout for the whole test, but only for |
| 239 | # the Model to output port numbers. |
| 240 | ( |
| 241 | if upon "$jenkins_run"; then |
| 242 | # Increase this timeout for a cluster run, as it could take longer if |
| 243 | # the load on the Jenkins server is high. |
| 244 | model_wait_timeout=120 |
| 245 | else |
| 246 | model_wait_timeout=30 |
| 247 | fi |
| 248 | sleep $model_wait_timeout |
| 249 | echo "Model wait timeout!" |
| 250 | kill "$$" |
| 251 | ) & |
| 252 | watchdog="$!" |
| 253 | |
| 254 | # Parse UARTs ports from early model output. Send a SIGSTOP to the model |
| 255 | # as soon as it outputs all UART ports. This is to prevent the model |
| 256 | # executing before the expect scripts get a chance to connect to the |
| 257 | # UART thereby losing messages. |
| 258 | model_fail=1 |
| 259 | while :; do |
| 260 | awk -v "num_uarts=$num_uarts" -f "$ports_script" "$model_out" \ |
| 261 | > "$ports_output" |
| 262 | if [ $(wc -l < "$ports_output") -eq "$num_uarts" ]; then |
| 263 | kill -SIGSTOP "$model_pid" |
| 264 | model_fail=0 |
| 265 | break |
| 266 | fi |
| 267 | |
| 268 | # Bail out if model exited meanwhile |
| 269 | if ! kill -0 "$model_pid" &>/dev/null; then |
| 270 | echo "Model terminated unexpectedly!" |
| 271 | break |
| 272 | fi |
| 273 | done |
| 274 | |
| 275 | # Kill the watch dog |
| 276 | kill_and_reap "$watchdog" || true |
| 277 | |
| 278 | # Check the model had failed meanwhile, for some reason |
| 279 | if [ "$model_fail" -ne 0 ]; then |
| 280 | exit 1 |
| 281 | fi |
| 282 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 283 | if ! [ -x "$(command -v expect)" ]; then |
| 284 | echo "Error: Expect is not installed." |
| 285 | exit 1 |
| 286 | fi |
| 287 | |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 288 | # The wait loop above exited after model port numbers have been parsed. The |
| 289 | # script's output is ready to be sourced now. |
| 290 | declare -a ports |
| 291 | source "$ports_output" |
| 292 | rm -f "$ports_output" |
| 293 | if [ "${#ports[@]}" -ne "$num_uarts" ]; then |
| 294 | echo "Failed to get UART port numbers" |
| 295 | kill_and_reap "$model_pid" |
| 296 | unset model_pid |
| 297 | fi |
| 298 | |
| 299 | # Launch expect scripts for all UARTs |
| 300 | uarts=0 |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 301 | for u in $(seq 0 $(( $num_uarts - 1 )) | tac); do |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 302 | script="run/uart$u/expect" |
| 303 | if [ -f "$script" ]; then |
| 304 | script="$(cat "$script")" |
| 305 | else |
| 306 | script= |
| 307 | fi |
| 308 | |
| 309 | # Primary UART must have a script |
| 310 | if [ -z "$script" ]; then |
| 311 | if [ "$u" = "$primary_uart" ]; then |
| 312 | die "No primary UART script!" |
| 313 | else |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 314 | echo "Ignoring UART$u (no expect script provided)." |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 315 | continue |
| 316 | fi |
| 317 | fi |
| 318 | |
| 319 | timeout="run/uart$u/timeout" |
| 320 | if [ -f "$timeout" ]; then |
| 321 | timeout="$(cat "$timeout")" |
| 322 | else |
| 323 | timeout= |
| 324 | fi |
| 325 | timeout="${timeout-600}" |
| 326 | |
| 327 | full_log="$run_root/uart${u}_full.txt" |
| 328 | |
| 329 | if [ "$u" = "$primary_uart" ]; then |
| 330 | star="*" |
| 331 | uart_name="primary_uart" |
| 332 | else |
| 333 | star=" " |
| 334 | uart_name="uart$u" |
| 335 | fi |
| 336 | |
| 337 | # Launch expect after exporting required variables |
| 338 | ( |
| 339 | if [ -f "run/uart$u/env" ]; then |
| 340 | set -a |
| 341 | source "run/uart$u/env" |
| 342 | set +a |
| 343 | fi |
| 344 | |
| 345 | if [ "$u" = "$primary_uart" ] && upon "$primary_live"; then |
| 346 | uart_port="${ports[$u]}" timeout="$timeout" \ |
| 347 | name="$uart_name" launch expect -f "$ci_root/expect/$script" | \ |
| 348 | tee "$full_log" |
| 349 | echo |
| 350 | else |
| 351 | uart_port="${ports[$u]}" timeout="$timeout" \ |
| 352 | name="$uart_name" launch expect -f "$ci_root/expect/$script" \ |
| 353 | &>"$full_log" |
| 354 | fi |
| 355 | |
| 356 | ) & |
| 357 | |
| 358 | let "uarts += 1" |
| 359 | echo "Tracking UART$u$star with $script; timeout $timeout." |
| 360 | done |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 361 | # Wait here long 'enough' for expect scripts to connect to ports; then |
| 362 | # let the model proceed |
| 363 | sleep 2 |
| 364 | kill -SIGCONT "$model_pid" |
| 365 | |
| 366 | # Wait for all children. Note that the wait below is *not* a timed wait. |
| 367 | result=0 |
| 368 | |
| 369 | set +e |
| 370 | pushd "$pid_dir" |
| 371 | while :; do |
| 372 | wait -n |
| 373 | |
| 374 | # Exit failure if we've any failures |
| 375 | if [ "$(wc -l < <(find -name '*.fail'))" -ne 0 ]; then |
| 376 | result=1 |
| 377 | break |
| 378 | fi |
| 379 | |
| 380 | # We're done if the primary UART exits success |
| 381 | if [ -f "$pid_dir/primary_uart.success" ]; then |
| 382 | break |
| 383 | fi |
| 384 | done |
| 385 | popd |
| 386 | |
| 387 | cleanup |
| 388 | |
| 389 | if [ "$result" -eq 0 ]; then |
| 390 | echo "Test success!" |
| 391 | else |
| 392 | echo "Test failed!" |
| 393 | fi |
| 394 | |
| 395 | if upon "$jenkins_run"; then |
| 396 | echo |
| 397 | echo "Artefacts location: $BUILD_URL." |
| 398 | echo |
| 399 | fi |
| 400 | |
| 401 | if upon "$jenkins_run" && upon "$artefacts_receiver" && [ -d "$workspace/run" ]; then |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 402 | source "$CI_ROOT/script/send_artefacts.sh" "run" |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 403 | fi |
| 404 | |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 405 | |
Zelalem | 219df41 | 2020-05-17 19:21:20 -0500 | [diff] [blame] | 406 | exit "$result" |
Fathi Boudra | 422bf77 | 2019-12-02 11:10:16 +0200 | [diff] [blame] | 407 | # vim: set tw=80 sw=8 noet: |