blob: e9908bcb06dd403ce2551e9a6a2fc6c8282bbf80 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
David Brazdil0f672f62019-12-10 10:32:29 +00003 * iteration_check.c: test races having to do with xarray iteration
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004 * Copyright (c) 2016 Intel Corporation
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00007#include <pthread.h>
8#include "test.h"
9
10#define NUM_THREADS 5
11#define MAX_IDX 100
David Brazdil0f672f62019-12-10 10:32:29 +000012#define TAG XA_MARK_0
13#define NEW_TAG XA_MARK_1
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000015static pthread_t threads[NUM_THREADS];
16static unsigned int seeds[3];
David Brazdil0f672f62019-12-10 10:32:29 +000017static DEFINE_XARRAY(array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018static bool test_complete;
19static int max_order;
20
David Brazdil0f672f62019-12-10 10:32:29 +000021void my_item_insert(struct xarray *xa, unsigned long index)
22{
23 XA_STATE(xas, xa, index);
24 struct item *item = item_create(index, 0);
25 int order;
26
27retry:
28 xas_lock(&xas);
29 for (order = max_order; order >= 0; order--) {
30 xas_set_order(&xas, index, order);
31 item->order = order;
32 if (xas_find_conflict(&xas))
33 continue;
34 xas_store(&xas, item);
35 xas_set_mark(&xas, TAG);
36 break;
37 }
38 xas_unlock(&xas);
39 if (xas_nomem(&xas, GFP_KERNEL))
40 goto retry;
41 if (order < 0)
42 free(item);
43}
44
45/* relentlessly fill the array with tagged entries */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046static void *add_entries_fn(void *arg)
47{
48 rcu_register_thread();
49
50 while (!test_complete) {
51 unsigned long pgoff;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052
53 for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
David Brazdil0f672f62019-12-10 10:32:29 +000054 my_item_insert(&array, pgoff);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 }
56 }
57
58 rcu_unregister_thread();
59
60 return NULL;
61}
62
63/*
David Brazdil0f672f62019-12-10 10:32:29 +000064 * Iterate over tagged entries, retrying when we find ourselves in a deleted
65 * node and randomly pausing the iteration.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 */
67static void *tagged_iteration_fn(void *arg)
68{
David Brazdil0f672f62019-12-10 10:32:29 +000069 XA_STATE(xas, &array, 0);
70 void *entry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000071
72 rcu_register_thread();
73
74 while (!test_complete) {
David Brazdil0f672f62019-12-10 10:32:29 +000075 xas_set(&xas, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +000077 xas_for_each_marked(&xas, entry, ULONG_MAX, TAG) {
78 if (xas_retry(&xas, entry))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 continue;
80
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081 if (rand_r(&seeds[0]) % 50 == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +000082 xas_pause(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000083 rcu_read_unlock();
84 rcu_barrier();
85 rcu_read_lock();
86 }
87 }
88 rcu_read_unlock();
89 }
90
91 rcu_unregister_thread();
92
93 return NULL;
94}
95
96/*
David Brazdil0f672f62019-12-10 10:32:29 +000097 * Iterate over the entries, retrying when we find ourselves in a deleted
98 * node and randomly pausing the iteration.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 */
100static void *untagged_iteration_fn(void *arg)
101{
David Brazdil0f672f62019-12-10 10:32:29 +0000102 XA_STATE(xas, &array, 0);
103 void *entry;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104
105 rcu_register_thread();
106
107 while (!test_complete) {
David Brazdil0f672f62019-12-10 10:32:29 +0000108 xas_set(&xas, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000110 xas_for_each(&xas, entry, ULONG_MAX) {
111 if (xas_retry(&xas, entry))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 continue;
113
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 if (rand_r(&seeds[1]) % 50 == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000115 xas_pause(&xas);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116 rcu_read_unlock();
117 rcu_barrier();
118 rcu_read_lock();
119 }
120 }
121 rcu_read_unlock();
122 }
123
124 rcu_unregister_thread();
125
126 return NULL;
127}
128
129/*
David Brazdil0f672f62019-12-10 10:32:29 +0000130 * Randomly remove entries to help induce retries in the
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131 * two iteration functions.
132 */
133static void *remove_entries_fn(void *arg)
134{
135 rcu_register_thread();
136
137 while (!test_complete) {
138 int pgoff;
David Brazdil0f672f62019-12-10 10:32:29 +0000139 struct item *item;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140
141 pgoff = rand_r(&seeds[2]) % MAX_IDX;
142
David Brazdil0f672f62019-12-10 10:32:29 +0000143 item = xa_erase(&array, pgoff);
144 if (item)
145 item_free(item, pgoff);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146 }
147
148 rcu_unregister_thread();
149
150 return NULL;
151}
152
153static void *tag_entries_fn(void *arg)
154{
155 rcu_register_thread();
156
157 while (!test_complete) {
David Brazdil0f672f62019-12-10 10:32:29 +0000158 tag_tagged_items(&array, 0, MAX_IDX, 10, TAG, NEW_TAG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159 }
160 rcu_unregister_thread();
161 return NULL;
162}
163
164/* This is a unit test for a bug found by the syzkaller tester */
165void iteration_test(unsigned order, unsigned test_duration)
166{
167 int i;
168
169 printv(1, "Running %siteration tests for %d seconds\n",
170 order > 0 ? "multiorder " : "", test_duration);
171
172 max_order = order;
173 test_complete = false;
174
175 for (i = 0; i < 3; i++)
176 seeds[i] = rand();
177
178 if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
179 perror("create tagged iteration thread");
180 exit(1);
181 }
182 if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
183 perror("create untagged iteration thread");
184 exit(1);
185 }
186 if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
187 perror("create add entry thread");
188 exit(1);
189 }
190 if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
191 perror("create remove entry thread");
192 exit(1);
193 }
194 if (pthread_create(&threads[4], NULL, tag_entries_fn, NULL)) {
195 perror("create tag entry thread");
196 exit(1);
197 }
198
199 sleep(test_duration);
200 test_complete = true;
201
202 for (i = 0; i < NUM_THREADS; i++) {
203 if (pthread_join(threads[i], NULL)) {
204 perror("pthread_join");
205 exit(1);
206 }
207 }
208
David Brazdil0f672f62019-12-10 10:32:29 +0000209 item_kill_tree(&array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210}