Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright The Transfer List Library Contributors |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT OR GPL-2.0-or-later |
| 5 | */ |
| 6 | |
| 7 | #include <stdio.h> |
| 8 | #include <stdlib.h> |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 9 | #include <string.h> |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 10 | |
| 11 | #include "test.h" |
| 12 | #include "transfer_list.h" |
| 13 | #include "unity.h" |
| 14 | |
| 15 | void *buffer = NULL; |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 16 | char *test_page_data = NULL; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 17 | |
| 18 | uint8_t byte_sum(const char *ptr, size_t len) |
| 19 | { |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 20 | uint8_t sum = 0; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 21 | |
| 22 | for (size_t i = 0; i < len; i++) { |
| 23 | sum += ptr[i]; |
| 24 | } |
| 25 | |
| 26 | return sum; |
| 27 | } |
| 28 | |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 29 | static void setup_test_entries(struct transfer_list_header *tl, |
| 30 | unsigned int tag_base, unsigned int num_entries, |
| 31 | struct transfer_list_entry **entries) |
| 32 | { |
| 33 | unsigned int i; |
| 34 | unsigned int te_data_size; |
| 35 | struct transfer_list_entry *te; |
| 36 | |
| 37 | te_data_size = tl->max_size / (num_entries * 2); |
| 38 | |
| 39 | for (i = 0; i < num_entries; i++) { |
| 40 | TEST_ASSERT(transfer_list_add(tl, tag_base + i, te_data_size, |
| 41 | test_page_data)); |
| 42 | TEST_ASSERT(te = transfer_list_find(tl, tag_base + i)); |
| 43 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 44 | entries[i] = te; |
| 45 | } |
| 46 | } |
| 47 | |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 48 | void test_add() |
| 49 | { |
| 50 | struct transfer_list_header *tl; |
| 51 | struct transfer_list_entry *te; |
| 52 | |
| 53 | TEST_ASSERT(tl = transfer_list_init((void *)buffer, TL_SIZE)); |
| 54 | |
| 55 | TEST_ASSERT(te = transfer_list_add(tl, test_tag, sizeof(test_data), |
| 56 | &test_data)); |
| 57 | TEST_ASSERT_EQUAL(0, byte_sum((char *)tl, tl->max_size)); |
| 58 | TEST_ASSERT(*(int *)transfer_list_entry_data(te) == test_data); |
| 59 | |
| 60 | /* Try to add a TE larger greater than allocated TL space */ |
| 61 | TEST_ASSERT_NULL(te = transfer_list_add(tl, 2, TL_SIZE, &test_data)); |
| 62 | TEST_ASSERT_EQUAL(0, byte_sum((char *)tl, tl->max_size)); |
| 63 | TEST_ASSERT_NULL(transfer_list_find(tl, 0x2)); |
| 64 | |
| 65 | unsigned int tags[4] = { TAG_GENERIC_START, TAG_GENERIC_END, |
| 66 | TAG_NON_STANDARD_START, TAG_NON_STANDARD_END }; |
| 67 | |
| 68 | for (size_t i = 0; i < 4; i++) { |
| 69 | TEST_ASSERT(te = transfer_list_add(tl, tags[i], |
| 70 | sizeof(test_data), |
| 71 | &test_data)); |
| 72 | TEST_ASSERT_EQUAL(0, byte_sum((char *)tl, tl->max_size)); |
| 73 | TEST_ASSERT(te = transfer_list_find(tl, tags[i])); |
| 74 | TEST_ASSERT(*(int *)transfer_list_entry_data(te) == test_data); |
| 75 | } |
| 76 | |
| 77 | transfer_list_dump(tl); |
| 78 | /* Add some out of bound tags. */ |
| 79 | TEST_ASSERT_NULL( |
| 80 | transfer_list_add(tl, 1 << 24, sizeof(test_data), &test_data)); |
| 81 | |
| 82 | TEST_ASSERT_NULL( |
| 83 | transfer_list_add(tl, -1, sizeof(test_data), &test_data)); |
| 84 | } |
| 85 | |
| 86 | void test_add_with_align() |
| 87 | { |
| 88 | struct transfer_list_header *tl = |
| 89 | transfer_list_init(buffer, TL_MAX_SIZE); |
| 90 | struct transfer_list_entry *te; |
| 91 | |
| 92 | unsigned int test_id = 1; |
| 93 | const unsigned int entry_size = 0xff; |
| 94 | int *data; |
| 95 | |
| 96 | TEST_ASSERT(tl->size == tl->hdr_size); |
| 97 | |
| 98 | /* |
| 99 | * When a new TE with a larger alignment requirement than already exists |
| 100 | * appears, the TE should be added and TL alignement updated. |
| 101 | */ |
| 102 | for (char align = 0; align < (1 << 4); align++, test_id++) { |
| 103 | TEST_ASSERT( |
| 104 | te = transfer_list_add_with_align( |
| 105 | tl, test_id, entry_size, &test_data, align)); |
| 106 | TEST_ASSERT(tl->alignment >= align); |
| 107 | TEST_ASSERT(te = transfer_list_find(tl, test_id)); |
| 108 | TEST_ASSERT(data = transfer_list_entry_data(te)); |
| 109 | TEST_ASSERT_FALSE((uintptr_t)data % (1 << align)); |
| 110 | TEST_ASSERT_EQUAL(*(int *)data, test_data); |
| 111 | } |
| 112 | } |
| 113 | |
| 114 | void test_rem() |
| 115 | { |
| 116 | struct transfer_list_header *tl = transfer_list_init(buffer, TL_SIZE); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 117 | struct transfer_list_entry *te[3]; |
| 118 | unsigned int tag_base = test_tag; |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 119 | |
| 120 | TEST_ASSERT_EQUAL(tl->size, tl->hdr_size); |
| 121 | |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 122 | setup_test_entries(tl, tag_base, 3, te); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 123 | |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 124 | /* Remove the TE and make sure it does not present in the TL. */ |
| 125 | TEST_ASSERT_TRUE(transfer_list_rem(tl, te[0])); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 126 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 127 | TEST_ASSERT_NULL(transfer_list_find(tl, tag_base)); |
| 128 | |
| 129 | /* Remove the TE3 and make sure it does not present in the TL. */ |
| 130 | TEST_ASSERT_TRUE(transfer_list_rem(tl, te[2])); |
| 131 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 132 | TEST_ASSERT_NULL(transfer_list_find(tl, tag_base + 2)); |
| 133 | |
| 134 | /* Remove the TE2 and make sure it does not present in the TL. */ |
| 135 | TEST_ASSERT_TRUE(transfer_list_rem(tl, te[1])); |
| 136 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 137 | TEST_ASSERT_NULL(transfer_list_find(tl, tag_base + 1)); |
| 138 | |
| 139 | /* |
| 140 | * Should have only one TL_TAG_EMPTY entry. |
| 141 | */ |
| 142 | TEST_ASSERT(te[0] = transfer_list_find(tl, TL_TAG_EMPTY)); |
| 143 | TEST_ASSERT(transfer_list_next(tl, te[0]) == NULL); |
| 144 | } |
| 145 | |
| 146 | void test_set_data_size() |
| 147 | { |
| 148 | struct transfer_list_header *tl = transfer_list_init(buffer, TL_SIZE); |
| 149 | struct transfer_list_entry *te[3]; |
| 150 | unsigned int tag_base = test_tag; |
| 151 | unsigned int tl_size; |
| 152 | |
| 153 | TEST_ASSERT_EQUAL(tl->size, tl->hdr_size); |
| 154 | |
| 155 | setup_test_entries(tl, tag_base, 3, te); |
| 156 | |
| 157 | /* Remove the TE2 and make sure it does not present in the TL. */ |
| 158 | TEST_ASSERT_TRUE(transfer_list_rem(tl, te[1])); |
| 159 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 160 | TEST_ASSERT_NULL(transfer_list_find(tl, tag_base + 1)); |
| 161 | |
| 162 | /* |
| 163 | * Increase te size to tl->max_size / 4, this shouldn't increase the |
| 164 | * current transfer list's size since it will be increased with |
| 165 | * adjacent TL_TAG_EMPTY entry. |
| 166 | */ |
| 167 | tl_size = tl->size; |
| 168 | TEST_ASSERT(transfer_list_set_data_size(tl, te[0], tl->max_size / 4)); |
| 169 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 170 | TEST_ASSERT(te[0]->data_size == tl->max_size / 4); |
| 171 | TEST_ASSERT(tl_size == tl->size); |
| 172 | |
| 173 | /* |
| 174 | * Increase te size to tl->max_size / 2, this increases |
| 175 | * the transfer list size. |
| 176 | */ |
| 177 | tl_size = tl->size; |
| 178 | TEST_ASSERT(transfer_list_set_data_size(tl, te[0], tl->max_size / 2)); |
| 179 | TEST_ASSERT(byte_sum((void *)tl, tl->size) == 0); |
| 180 | TEST_ASSERT(te[0]->data_size == tl->max_size / 2); |
| 181 | TEST_ASSERT(tl_size < tl->size); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | void setUp(void) |
| 185 | { |
| 186 | buffer = malloc(TL_MAX_SIZE); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 187 | test_page_data = malloc(TL_SIZE); |
| 188 | memset(test_page_data, 0xff, TL_SIZE); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | void tearDown(void) |
| 192 | { |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 193 | free(test_page_data); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 194 | free(buffer); |
| 195 | buffer = NULL; |
| 196 | } |
| 197 | |
| 198 | int main(void) |
| 199 | { |
| 200 | UNITY_BEGIN(); |
| 201 | RUN_TEST(test_add); |
| 202 | RUN_TEST(test_add_with_align); |
Yeoreum Yun | b7dc0de | 2025-06-02 20:38:19 +0100 | [diff] [blame] | 203 | RUN_TEST(test_rem); |
| 204 | RUN_TEST(test_set_data_size); |
Manish Pandey | 65fe364 | 2025-03-21 12:44:42 +0000 | [diff] [blame] | 205 | return UNITY_END(); |
| 206 | } |