|  | #line 2 "suites/host_test.function" | 
|  |  | 
|  | /** | 
|  | * \brief       Verifies that string is in string parameter format i.e. "<str>" | 
|  | *              It also strips enclosing '"' from the input string. | 
|  | * | 
|  | * \param str   String parameter. | 
|  | * | 
|  | * \return      0 if success else 1 | 
|  | */ | 
|  | int verify_string(char **str) | 
|  | { | 
|  | if ((*str)[0] != '"' || | 
|  | (*str)[strlen(*str) - 1] != '"') { | 
|  | mbedtls_fprintf(stderr, | 
|  | "Expected string (with \"\") for parameter and got: %s\n", *str); | 
|  | return -1; | 
|  | } | 
|  |  | 
|  | (*str)++; | 
|  | (*str)[strlen(*str) - 1] = '\0'; | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Verifies that string is an integer. Also gives the converted | 
|  | *              integer value. | 
|  | * | 
|  | * \param str   Input string. | 
|  | * \param p_value Pointer to output value. | 
|  | * | 
|  | * \return      0 if success else 1 | 
|  | */ | 
|  | int verify_int(char *str, intmax_t *p_value) | 
|  | { | 
|  | char *end = NULL; | 
|  | errno = 0; | 
|  | /* Limit the range to long: for large integers, the test framework will | 
|  | * use expressions anyway. */ | 
|  | long value = strtol(str, &end, 0); | 
|  | if (errno == EINVAL || *end != '\0') { | 
|  | mbedtls_fprintf(stderr, | 
|  | "Expected integer for parameter and got: %s\n", str); | 
|  | return KEY_VALUE_MAPPING_NOT_FOUND; | 
|  | } | 
|  | if (errno == ERANGE) { | 
|  | mbedtls_fprintf(stderr, "Integer out of range: %s\n", str); | 
|  | return KEY_VALUE_MAPPING_NOT_FOUND; | 
|  | } | 
|  | *p_value = value; | 
|  | return 0; | 
|  | } | 
|  |  | 
|  |  | 
|  | /** | 
|  | * \brief       Usage string. | 
|  | * | 
|  | */ | 
|  | #define USAGE \ | 
|  | "Usage: %s [OPTIONS] files...\n\n" \ | 
|  | "   Command line arguments:\n" \ | 
|  | "     files...          One or more test data files. If no file is\n" \ | 
|  | "                       specified the following default test case\n" \ | 
|  | "                       file is used:\n" \ | 
|  | "                           %s\n\n" \ | 
|  | "   Options:\n" \ | 
|  | "     -v | --verbose    Display full information about each test\n" \ | 
|  | "     -h | --help       Display this information\n\n", \ | 
|  | argv[0], \ | 
|  | "TESTCASE_FILENAME" | 
|  |  | 
|  |  | 
|  | /** | 
|  | * \brief       Read a line from the passed file pointer. | 
|  | * | 
|  | * \param f     FILE pointer | 
|  | * \param buf   Pointer to memory to hold read line. | 
|  | * \param len   Length of the buf. | 
|  | * | 
|  | * \return      0 if success else -1 | 
|  | */ | 
|  | int get_line(FILE *f, char *buf, size_t len) | 
|  | { | 
|  | char *ret; | 
|  | int i = 0, str_len = 0, has_string = 0; | 
|  |  | 
|  | /* Read until we get a valid line */ | 
|  | do { | 
|  | ret = fgets(buf, len, f); | 
|  | if (ret == NULL) { | 
|  | return -1; | 
|  | } | 
|  |  | 
|  | str_len = strlen(buf); | 
|  |  | 
|  | /* Skip empty line and comment */ | 
|  | if (str_len == 0 || buf[0] == '#') { | 
|  | continue; | 
|  | } | 
|  | has_string = 0; | 
|  | for (i = 0; i < str_len; i++) { | 
|  | char c = buf[i]; | 
|  | if (c != ' ' && c != '\t' && c != '\n' && | 
|  | c != '\v' && c != '\f' && c != '\r') { | 
|  | has_string = 1; | 
|  | break; | 
|  | } | 
|  | } | 
|  | } while (!has_string); | 
|  |  | 
|  | /* Strip new line and carriage return */ | 
|  | ret = buf + strlen(buf); | 
|  | if (ret-- > buf && *ret == '\n') { | 
|  | *ret = '\0'; | 
|  | } | 
|  | if (ret-- > buf && *ret == '\r') { | 
|  | *ret = '\0'; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Splits string delimited by ':'. Ignores '\:'. | 
|  | * | 
|  | * \param buf           Input string | 
|  | * \param len           Input string length | 
|  | * \param params        Out params found | 
|  | * \param params_len    Out params array len | 
|  | * | 
|  | * \return      Count of strings found. | 
|  | */ | 
|  | static int parse_arguments(char *buf, size_t len, char **params, | 
|  | size_t params_len) | 
|  | { | 
|  | size_t cnt = 0, i; | 
|  | char *cur = buf; | 
|  | char *p = buf, *q; | 
|  |  | 
|  | params[cnt++] = cur; | 
|  |  | 
|  | while (*p != '\0' && p < (buf + len)) { | 
|  | if (*p == '\\') { | 
|  | p++; | 
|  | p++; | 
|  | continue; | 
|  | } | 
|  | if (*p == ':') { | 
|  | if (p + 1 < buf + len) { | 
|  | cur = p + 1; | 
|  | TEST_HELPER_ASSERT(cnt < params_len); | 
|  | params[cnt++] = cur; | 
|  | } | 
|  | *p = '\0'; | 
|  | } | 
|  |  | 
|  | p++; | 
|  | } | 
|  |  | 
|  | /* Replace backslash escapes in strings */ | 
|  | for (i = 0; i < cnt; i++) { | 
|  | p = params[i]; | 
|  | q = params[i]; | 
|  |  | 
|  | while (*p != '\0') { | 
|  | if (*p == '\\') { | 
|  | ++p; | 
|  | switch (*p) { | 
|  | case 'n': | 
|  | *p = '\n'; | 
|  | break; | 
|  | default: | 
|  | // Fall through to copying *p | 
|  | break; | 
|  | } | 
|  | } | 
|  | *(q++) = *(p++); | 
|  | } | 
|  | *q = '\0'; | 
|  | } | 
|  |  | 
|  | return cnt; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Converts parameters into test function consumable parameters. | 
|  | *              Example: Input:  {"int", "0", "char*", "Hello", | 
|  | *                                "hex", "abef", "exp", "1"} | 
|  | *                      Output:  { | 
|  | *                                0,                // Verified int | 
|  | *                                "Hello",          // Verified string | 
|  | *                                2, { 0xab, 0xef },// Converted len,hex pair | 
|  | *                                9600              // Evaluated expression | 
|  | *                               } | 
|  | * | 
|  | * | 
|  | * \param cnt               Parameter array count. | 
|  | * \param params            Out array of found parameters. | 
|  | * \param int_params_store  Memory for storing processed integer parameters. | 
|  | * | 
|  | * \return      0 for success else 1 | 
|  | */ | 
|  | static int convert_params(size_t cnt, char **params, | 
|  | mbedtls_test_argument_t *int_params_store) | 
|  | { | 
|  | char **cur = params; | 
|  | char **out = params; | 
|  | int ret = DISPATCH_TEST_SUCCESS; | 
|  |  | 
|  | while (cur < params + cnt) { | 
|  | char *type = *cur++; | 
|  | char *val = *cur++; | 
|  |  | 
|  | if (strcmp(type, "char*") == 0) { | 
|  | if (verify_string(&val) == 0) { | 
|  | *out++ = val; | 
|  | } else { | 
|  | ret = (DISPATCH_INVALID_TEST_DATA); | 
|  | break; | 
|  | } | 
|  | } else if (strcmp(type, "int") == 0) { | 
|  | if (verify_int(val, &int_params_store->sint) == 0) { | 
|  | *out++ = (char *) int_params_store++; | 
|  | } else { | 
|  | ret = (DISPATCH_INVALID_TEST_DATA); | 
|  | break; | 
|  | } | 
|  | } else if (strcmp(type, "hex") == 0) { | 
|  | if (verify_string(&val) == 0) { | 
|  | size_t len; | 
|  |  | 
|  | TEST_HELPER_ASSERT( | 
|  | mbedtls_test_unhexify((unsigned char *) val, strlen(val), | 
|  | val, &len) == 0); | 
|  |  | 
|  | int_params_store->len = len; | 
|  | *out++ = val; | 
|  | *out++ = (char *) (int_params_store++); | 
|  | } else { | 
|  | ret = (DISPATCH_INVALID_TEST_DATA); | 
|  | break; | 
|  | } | 
|  | } else if (strcmp(type, "exp") == 0) { | 
|  | int exp_id = strtol(val, NULL, 10); | 
|  | if (get_expression(exp_id, &int_params_store->sint) == 0) { | 
|  | *out++ = (char *) int_params_store++; | 
|  | } else { | 
|  | ret = (DISPATCH_INVALID_TEST_DATA); | 
|  | break; | 
|  | } | 
|  | } else { | 
|  | ret = (DISPATCH_INVALID_TEST_DATA); | 
|  | break; | 
|  | } | 
|  | } | 
|  | return ret; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Tests snprintf implementation with test input. | 
|  | * | 
|  | * \note | 
|  | * At high optimization levels (e.g. gcc -O3), this function may be | 
|  | * inlined in run_test_snprintf. This can trigger a spurious warning about | 
|  | * potential misuse of snprintf from gcc -Wformat-truncation (observed with | 
|  | * gcc 7.2). This warning makes tests in run_test_snprintf redundant on gcc | 
|  | * only. They are still valid for other compilers. Avoid this warning by | 
|  | * forbidding inlining of this function by gcc. | 
|  | * | 
|  | * \param n         Buffer test length. | 
|  | * \param ref_buf   Expected buffer. | 
|  | * \param ref_ret   Expected snprintf return value. | 
|  | * | 
|  | * \return      0 for success else 1 | 
|  | */ | 
|  | #if defined(__GNUC__) | 
|  | __attribute__((__noinline__)) | 
|  | #endif | 
|  | static int test_snprintf(size_t n, const char *ref_buf, int ref_ret) | 
|  | { | 
|  | int ret; | 
|  | char buf[10] = "xxxxxxxxx"; | 
|  | const char ref[10] = "xxxxxxxxx"; | 
|  |  | 
|  | if (n >= sizeof(buf)) { | 
|  | return -1; | 
|  | } | 
|  | ret = mbedtls_snprintf(buf, n, "%s", "123"); | 
|  | if (ret < 0 || (size_t) ret >= n) { | 
|  | ret = -1; | 
|  | } | 
|  |  | 
|  | if (strncmp(ref_buf, buf, sizeof(buf)) != 0 || | 
|  | ref_ret != ret || | 
|  | memcmp(buf + n, ref + n, sizeof(buf) - n) != 0) { | 
|  | return 1; | 
|  | } | 
|  |  | 
|  | return 0; | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Tests snprintf implementation. | 
|  | * | 
|  | * \return      0 for success else 1 | 
|  | */ | 
|  | static int run_test_snprintf(void) | 
|  | { | 
|  | return test_snprintf(0, "xxxxxxxxx",  -1) != 0 || | 
|  | test_snprintf(1, "",           -1) != 0 || | 
|  | test_snprintf(2, "1",          -1) != 0 || | 
|  | test_snprintf(3, "12",         -1) != 0 || | 
|  | test_snprintf(4, "123",         3) != 0 || | 
|  | test_snprintf(5, "123",         3) != 0; | 
|  | } | 
|  |  | 
|  | /** \brief Write the description of the test case to the outcome CSV file. | 
|  | * | 
|  | * \param outcome_file  The file to write to. | 
|  | *                      If this is \c NULL, this function does nothing. | 
|  | * \param argv0         The test suite name. | 
|  | * \param test_case     The test case description. | 
|  | */ | 
|  | static void write_outcome_entry(FILE *outcome_file, | 
|  | const char *argv0, | 
|  | const char *test_case) | 
|  | { | 
|  | /* The non-varying fields are initialized on first use. */ | 
|  | static const char *platform = NULL; | 
|  | static const char *configuration = NULL; | 
|  | static const char *test_suite = NULL; | 
|  |  | 
|  | if (outcome_file == NULL) { | 
|  | return; | 
|  | } | 
|  |  | 
|  | if (platform == NULL) { | 
|  | platform = getenv("MBEDTLS_TEST_PLATFORM"); | 
|  | if (platform == NULL) { | 
|  | platform = "unknown"; | 
|  | } | 
|  | } | 
|  | if (configuration == NULL) { | 
|  | configuration = getenv("MBEDTLS_TEST_CONFIGURATION"); | 
|  | if (configuration == NULL) { | 
|  | configuration = "unknown"; | 
|  | } | 
|  | } | 
|  | if (test_suite == NULL) { | 
|  | test_suite = strrchr(argv0, '/'); | 
|  | if (test_suite != NULL) { | 
|  | test_suite += 1; // skip the '/' | 
|  | } else { | 
|  | test_suite = argv0; | 
|  | } | 
|  | } | 
|  |  | 
|  | /* Write the beginning of the outcome line. | 
|  | * Ignore errors: writing the outcome file is on a best-effort basis. */ | 
|  | mbedtls_fprintf(outcome_file, "%s;%s;%s;%s;", | 
|  | platform, configuration, test_suite, test_case); | 
|  | } | 
|  |  | 
|  | /** \brief Write the result of the test case to the outcome CSV file. | 
|  | * | 
|  | * \param outcome_file  The file to write to. | 
|  | *                      If this is \c NULL, this function does nothing. | 
|  | * \param unmet_dep_count            The number of unmet dependencies. | 
|  | * \param unmet_dependencies         The array of unmet dependencies. | 
|  | * \param missing_unmet_dependencies Non-zero if there was a problem tracking | 
|  | *                                   all unmet dependencies, 0 otherwise. | 
|  | * \param ret                        The test dispatch status (DISPATCH_xxx). | 
|  | * \param info                       A pointer to the test info structure. | 
|  | */ | 
|  | static void write_outcome_result(FILE *outcome_file, | 
|  | size_t unmet_dep_count, | 
|  | int unmet_dependencies[], | 
|  | int missing_unmet_dependencies, | 
|  | int ret, | 
|  | const mbedtls_test_info_t *info) | 
|  | { | 
|  | if (outcome_file == NULL) { | 
|  | return; | 
|  | } | 
|  |  | 
|  | /* Write the end of the outcome line. | 
|  | * Ignore errors: writing the outcome file is on a best-effort basis. */ | 
|  | switch (ret) { | 
|  | case DISPATCH_TEST_SUCCESS: | 
|  | if (unmet_dep_count > 0) { | 
|  | size_t i; | 
|  | mbedtls_fprintf(outcome_file, "SKIP"); | 
|  | for (i = 0; i < unmet_dep_count; i++) { | 
|  | mbedtls_fprintf(outcome_file, "%c%d", | 
|  | i == 0 ? ';' : ':', | 
|  | unmet_dependencies[i]); | 
|  | } | 
|  | if (missing_unmet_dependencies) { | 
|  | mbedtls_fprintf(outcome_file, ":..."); | 
|  | } | 
|  | break; | 
|  | } | 
|  | switch (info->result) { | 
|  | case MBEDTLS_TEST_RESULT_SUCCESS: | 
|  | mbedtls_fprintf(outcome_file, "PASS;"); | 
|  | break; | 
|  | case MBEDTLS_TEST_RESULT_SKIPPED: | 
|  | mbedtls_fprintf(outcome_file, "SKIP;Runtime skip"); | 
|  | break; | 
|  | default: | 
|  | mbedtls_fprintf(outcome_file, "FAIL;%s:%d:%s", | 
|  | info->filename, info->line_no, | 
|  | info->test); | 
|  | break; | 
|  | } | 
|  | break; | 
|  | case DISPATCH_TEST_FN_NOT_FOUND: | 
|  | mbedtls_fprintf(outcome_file, "FAIL;Test function not found"); | 
|  | break; | 
|  | case DISPATCH_INVALID_TEST_DATA: | 
|  | mbedtls_fprintf(outcome_file, "FAIL;Invalid test data"); | 
|  | break; | 
|  | case DISPATCH_UNSUPPORTED_SUITE: | 
|  | mbedtls_fprintf(outcome_file, "SKIP;Unsupported suite"); | 
|  | break; | 
|  | default: | 
|  | mbedtls_fprintf(outcome_file, "FAIL;Unknown cause"); | 
|  | break; | 
|  | } | 
|  | mbedtls_fprintf(outcome_file, "\n"); | 
|  | fflush(outcome_file); | 
|  | } | 
|  |  | 
|  | /** | 
|  | * \brief       Desktop implementation of execute_tests(). | 
|  | *              Parses command line and executes tests from | 
|  | *              supplied or default data file. | 
|  | * | 
|  | * \param argc  Command line argument count. | 
|  | * \param argv  Argument array. | 
|  | * | 
|  | * \return      Program exit status. | 
|  | */ | 
|  | int execute_tests(int argc, const char **argv) | 
|  | { | 
|  | /* Local Configurations and options */ | 
|  | const char *default_filename = "DATA_FILE"; | 
|  | const char *test_filename = NULL; | 
|  | const char **test_files = NULL; | 
|  | size_t testfile_count = 0; | 
|  | int option_verbose = 0; | 
|  | size_t function_id = 0; | 
|  |  | 
|  | /* Other Local variables */ | 
|  | int arg_index = 1; | 
|  | const char *next_arg; | 
|  | size_t testfile_index, i, cnt; | 
|  | int ret; | 
|  | unsigned total_errors = 0, total_tests = 0, total_skipped = 0; | 
|  | FILE *file; | 
|  | char buf[5000]; | 
|  | char *params[50]; | 
|  | /* Store for processed integer params. */ | 
|  | mbedtls_test_argument_t int_params[50]; | 
|  | void *pointer; | 
|  | #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) | 
|  | int stdout_fd = -1; | 
|  | #endif /* __unix__ || __APPLE__ __MACH__ */ | 
|  | const char *outcome_file_name = getenv("MBEDTLS_TEST_OUTCOME_FILE"); | 
|  | FILE *outcome_file = NULL; | 
|  |  | 
|  | #if defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C) && \ | 
|  | !defined(TEST_SUITE_MEMORY_BUFFER_ALLOC) | 
|  | unsigned char alloc_buf[1000000]; | 
|  | mbedtls_memory_buffer_alloc_init(alloc_buf, sizeof(alloc_buf)); | 
|  | #endif | 
|  |  | 
|  | #if defined(MBEDTLS_TEST_MUTEX_USAGE) | 
|  | mbedtls_test_mutex_usage_init(); | 
|  | #endif | 
|  |  | 
|  | /* | 
|  | * The C standard doesn't guarantee that all-bits-0 is the representation | 
|  | * of a NULL pointer. We do however use that in our code for initializing | 
|  | * structures, which should work on every modern platform. Let's be sure. | 
|  | */ | 
|  | memset(&pointer, 0, sizeof(void *)); | 
|  | if (pointer != NULL) { | 
|  | mbedtls_fprintf(stderr, "all-bits-zero is not a NULL pointer\n"); | 
|  | return 1; | 
|  | } | 
|  |  | 
|  | /* | 
|  | * Make sure we have a snprintf that correctly zero-terminates | 
|  | */ | 
|  | if (run_test_snprintf() != 0) { | 
|  | mbedtls_fprintf(stderr, "the snprintf implementation is broken\n"); | 
|  | return 1; | 
|  | } | 
|  |  | 
|  | if (outcome_file_name != NULL && *outcome_file_name != '\0') { | 
|  | outcome_file = fopen(outcome_file_name, "a"); | 
|  | if (outcome_file == NULL) { | 
|  | mbedtls_fprintf(stderr, "Unable to open outcome file. Continuing anyway.\n"); | 
|  | } | 
|  | } | 
|  |  | 
|  | while (arg_index < argc) { | 
|  | next_arg = argv[arg_index]; | 
|  |  | 
|  | if (strcmp(next_arg, "--verbose") == 0 || | 
|  | strcmp(next_arg, "-v") == 0) { | 
|  | option_verbose = 1; | 
|  | } else if (strcmp(next_arg, "--help") == 0 || | 
|  | strcmp(next_arg, "-h") == 0) { | 
|  | mbedtls_fprintf(stdout, USAGE); | 
|  | mbedtls_exit(EXIT_SUCCESS); | 
|  | } else { | 
|  | /* Not an option, therefore treat all further arguments as the file | 
|  | * list. | 
|  | */ | 
|  | test_files = &argv[arg_index]; | 
|  | testfile_count = argc - arg_index; | 
|  | break; | 
|  | } | 
|  |  | 
|  | arg_index++; | 
|  | } | 
|  |  | 
|  | /* If no files were specified, assume a default */ | 
|  | if (test_files == NULL || testfile_count == 0) { | 
|  | test_files = &default_filename; | 
|  | testfile_count = 1; | 
|  | } | 
|  |  | 
|  | /* Initialize the struct that holds information about the last test */ | 
|  | mbedtls_test_info_reset(); | 
|  |  | 
|  | /* Now begin to execute the tests in the testfiles */ | 
|  | for (testfile_index = 0; | 
|  | testfile_index < testfile_count; | 
|  | testfile_index++) { | 
|  | size_t unmet_dep_count = 0; | 
|  | int unmet_dependencies[20]; | 
|  | int missing_unmet_dependencies = 0; | 
|  |  | 
|  | test_filename = test_files[testfile_index]; | 
|  |  | 
|  | file = fopen(test_filename, "r"); | 
|  | if (file == NULL) { | 
|  | mbedtls_fprintf(stderr, "Failed to open test file: %s\n", | 
|  | test_filename); | 
|  | if (outcome_file != NULL) { | 
|  | fclose(outcome_file); | 
|  | } | 
|  | return 1; | 
|  | } | 
|  |  | 
|  | while (!feof(file)) { | 
|  | if (unmet_dep_count > 0) { | 
|  | mbedtls_fprintf(stderr, | 
|  | "FATAL: Dep count larger than zero at start of loop\n"); | 
|  | mbedtls_exit(MBEDTLS_EXIT_FAILURE); | 
|  | } | 
|  | unmet_dep_count = 0; | 
|  | missing_unmet_dependencies = 0; | 
|  |  | 
|  | if ((ret = get_line(file, buf, sizeof(buf))) != 0) { | 
|  | break; | 
|  | } | 
|  | mbedtls_fprintf(stdout, "%s%.66s", | 
|  | mbedtls_test_info.result == MBEDTLS_TEST_RESULT_FAILED ? | 
|  | "\n" : "", buf); | 
|  | mbedtls_fprintf(stdout, " "); | 
|  | for (i = strlen(buf) + 1; i < 67; i++) { | 
|  | mbedtls_fprintf(stdout, "."); | 
|  | } | 
|  | mbedtls_fprintf(stdout, " "); | 
|  | fflush(stdout); | 
|  | write_outcome_entry(outcome_file, argv[0], buf); | 
|  |  | 
|  | total_tests++; | 
|  |  | 
|  | if ((ret = get_line(file, buf, sizeof(buf))) != 0) { | 
|  | break; | 
|  | } | 
|  | cnt = parse_arguments(buf, strlen(buf), params, | 
|  | sizeof(params) / sizeof(params[0])); | 
|  |  | 
|  | if (strcmp(params[0], "depends_on") == 0) { | 
|  | for (i = 1; i < cnt; i++) { | 
|  | int dep_id = strtol(params[i], NULL, 10); | 
|  | if (dep_check(dep_id) != DEPENDENCY_SUPPORTED) { | 
|  | if (unmet_dep_count < | 
|  | ARRAY_LENGTH(unmet_dependencies)) { | 
|  | unmet_dependencies[unmet_dep_count] = dep_id; | 
|  | unmet_dep_count++; | 
|  | } else { | 
|  | missing_unmet_dependencies = 1; | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | if ((ret = get_line(file, buf, sizeof(buf))) != 0) { | 
|  | break; | 
|  | } | 
|  | cnt = parse_arguments(buf, strlen(buf), params, | 
|  | sizeof(params) / sizeof(params[0])); | 
|  | } | 
|  |  | 
|  | // If there are no unmet dependencies execute the test | 
|  | if (unmet_dep_count == 0) { | 
|  | mbedtls_test_info_reset(); | 
|  |  | 
|  | #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) | 
|  | /* Suppress all output from the library unless we're verbose | 
|  | * mode | 
|  | */ | 
|  | if (!option_verbose) { | 
|  | stdout_fd = redirect_output(stdout, "/dev/null"); | 
|  | if (stdout_fd == -1) { | 
|  | /* Redirection has failed with no stdout so exit */ | 
|  | exit(1); | 
|  | } | 
|  | } | 
|  | #endif /* __unix__ || __APPLE__ __MACH__ */ | 
|  |  | 
|  | function_id = strtoul(params[0], NULL, 10); | 
|  | if ((ret = check_test(function_id)) == DISPATCH_TEST_SUCCESS) { | 
|  | ret = convert_params(cnt - 1, params + 1, int_params); | 
|  | if (DISPATCH_TEST_SUCCESS == ret) { | 
|  | ret = dispatch_test(function_id, (void **) (params + 1)); | 
|  | } | 
|  | } | 
|  |  | 
|  | #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) | 
|  | if (!option_verbose && restore_output(stdout, stdout_fd)) { | 
|  | /* Redirection has failed with no stdout so exit */ | 
|  | exit(1); | 
|  | } | 
|  | #endif /* __unix__ || __APPLE__ __MACH__ */ | 
|  |  | 
|  | } | 
|  |  | 
|  | write_outcome_result(outcome_file, | 
|  | unmet_dep_count, unmet_dependencies, | 
|  | missing_unmet_dependencies, | 
|  | ret, &mbedtls_test_info); | 
|  | if (unmet_dep_count > 0 || ret == DISPATCH_UNSUPPORTED_SUITE) { | 
|  | total_skipped++; | 
|  | mbedtls_fprintf(stdout, "----"); | 
|  |  | 
|  | if (1 == option_verbose && ret == DISPATCH_UNSUPPORTED_SUITE) { | 
|  | mbedtls_fprintf(stdout, "\n   Test Suite not enabled"); | 
|  | } | 
|  |  | 
|  | if (1 == option_verbose && unmet_dep_count > 0) { | 
|  | mbedtls_fprintf(stdout, "\n   Unmet dependencies: "); | 
|  | for (i = 0; i < unmet_dep_count; i++) { | 
|  | mbedtls_fprintf(stdout, "%d ", | 
|  | unmet_dependencies[i]); | 
|  | } | 
|  | if (missing_unmet_dependencies) { | 
|  | mbedtls_fprintf(stdout, "..."); | 
|  | } | 
|  | } | 
|  | mbedtls_fprintf(stdout, "\n"); | 
|  | fflush(stdout); | 
|  |  | 
|  | unmet_dep_count = 0; | 
|  | missing_unmet_dependencies = 0; | 
|  | } else if (ret == DISPATCH_TEST_SUCCESS) { | 
|  | if (mbedtls_test_info.result == MBEDTLS_TEST_RESULT_SUCCESS) { | 
|  | mbedtls_fprintf(stdout, "PASS\n"); | 
|  | } else if (mbedtls_test_info.result == MBEDTLS_TEST_RESULT_SKIPPED) { | 
|  | mbedtls_fprintf(stdout, "----\n"); | 
|  | total_skipped++; | 
|  | } else { | 
|  | total_errors++; | 
|  | mbedtls_fprintf(stdout, "FAILED\n"); | 
|  | mbedtls_fprintf(stdout, "  %s\n  at ", | 
|  | mbedtls_test_info.test); | 
|  | if (mbedtls_test_info.step != (unsigned long) (-1)) { | 
|  | mbedtls_fprintf(stdout, "step %lu, ", | 
|  | mbedtls_test_info.step); | 
|  | } | 
|  | mbedtls_fprintf(stdout, "line %d, %s", | 
|  | mbedtls_test_info.line_no, | 
|  | mbedtls_test_info.filename); | 
|  | if (mbedtls_test_info.line1[0] != 0) { | 
|  | mbedtls_fprintf(stdout, "\n  %s", | 
|  | mbedtls_test_info.line1); | 
|  | } | 
|  | if (mbedtls_test_info.line2[0] != 0) { | 
|  | mbedtls_fprintf(stdout, "\n  %s", | 
|  | mbedtls_test_info.line2); | 
|  | } | 
|  | } | 
|  | fflush(stdout); | 
|  | } else if (ret == DISPATCH_INVALID_TEST_DATA) { | 
|  | mbedtls_fprintf(stderr, "FAILED: FATAL PARSE ERROR\n"); | 
|  | fclose(file); | 
|  | mbedtls_exit(2); | 
|  | } else if (ret == DISPATCH_TEST_FN_NOT_FOUND) { | 
|  | mbedtls_fprintf(stderr, "FAILED: FATAL TEST FUNCTION NOT FOUND\n"); | 
|  | fclose(file); | 
|  | mbedtls_exit(2); | 
|  | } else { | 
|  | total_errors++; | 
|  | } | 
|  | } | 
|  | fclose(file); | 
|  | } | 
|  |  | 
|  | if (outcome_file != NULL) { | 
|  | fclose(outcome_file); | 
|  | } | 
|  |  | 
|  | mbedtls_fprintf(stdout, | 
|  | "\n----------------------------------------------------------------------------\n\n"); | 
|  | if (total_errors == 0) { | 
|  | mbedtls_fprintf(stdout, "PASSED"); | 
|  | } else { | 
|  | mbedtls_fprintf(stdout, "FAILED"); | 
|  | } | 
|  |  | 
|  | mbedtls_fprintf(stdout, " (%u / %u tests (%u skipped))\n", | 
|  | total_tests - total_errors, total_tests, total_skipped); | 
|  |  | 
|  | #if defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C) && \ | 
|  | !defined(TEST_SUITE_MEMORY_BUFFER_ALLOC) | 
|  | #if defined(MBEDTLS_MEMORY_DEBUG) | 
|  | mbedtls_memory_buffer_alloc_status(); | 
|  | #endif | 
|  | mbedtls_memory_buffer_alloc_free(); | 
|  | #endif | 
|  |  | 
|  | return total_errors != 0; | 
|  | } |