blob: 6d5e5000fdd7967b77d49425a384ba7d7287cb9f [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright 2002 H. Peter Anvin - All Rights Reserved
5 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 * ----------------------------------------------------------------------- */
7
8/*
9 * raid6/algos.c
10 *
11 * Algorithm list and algorithm selection for RAID-6
12 */
13
14#include <linux/raid/pq.h>
15#ifndef __KERNEL__
16#include <sys/mman.h>
17#include <stdio.h>
18#else
19#include <linux/module.h>
20#include <linux/gfp.h>
21#if !RAID6_USE_EMPTY_ZERO_PAGE
22/* In .bss so it's zeroed */
23const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
24EXPORT_SYMBOL(raid6_empty_zero_page);
25#endif
26#endif
27
28struct raid6_calls raid6_call;
29EXPORT_SYMBOL_GPL(raid6_call);
30
31const struct raid6_calls * const raid6_algos[] = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032#if defined(__i386__) && !defined(__arch_um__)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000033#ifdef CONFIG_AS_AVX512
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034 &raid6_avx512x2,
David Brazdil0f672f62019-12-10 10:32:29 +000035 &raid6_avx512x1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036#endif
David Brazdil0f672f62019-12-10 10:32:29 +000037 &raid6_avx2x2,
38 &raid6_avx2x1,
David Brazdil0f672f62019-12-10 10:32:29 +000039 &raid6_sse2x2,
40 &raid6_sse2x1,
41 &raid6_sse1x2,
42 &raid6_sse1x1,
43 &raid6_mmxx2,
44 &raid6_mmxx1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045#endif
46#if defined(__x86_64__) && !defined(__arch_um__)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000047#ifdef CONFIG_AS_AVX512
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048 &raid6_avx512x4,
David Brazdil0f672f62019-12-10 10:32:29 +000049 &raid6_avx512x2,
50 &raid6_avx512x1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051#endif
David Brazdil0f672f62019-12-10 10:32:29 +000052 &raid6_avx2x4,
53 &raid6_avx2x2,
54 &raid6_avx2x1,
David Brazdil0f672f62019-12-10 10:32:29 +000055 &raid6_sse2x4,
56 &raid6_sse2x2,
57 &raid6_sse2x1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058#endif
59#ifdef CONFIG_ALTIVEC
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060 &raid6_vpermxor8,
David Brazdil0f672f62019-12-10 10:32:29 +000061 &raid6_vpermxor4,
62 &raid6_vpermxor2,
63 &raid6_vpermxor1,
64 &raid6_altivec8,
65 &raid6_altivec4,
66 &raid6_altivec2,
67 &raid6_altivec1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000068#endif
69#if defined(CONFIG_S390)
70 &raid6_s390vx8,
71#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072#ifdef CONFIG_KERNEL_MODE_NEON
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073 &raid6_neonx8,
David Brazdil0f672f62019-12-10 10:32:29 +000074 &raid6_neonx4,
75 &raid6_neonx2,
76 &raid6_neonx1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077#endif
David Brazdil0f672f62019-12-10 10:32:29 +000078#if defined(__ia64__)
79 &raid6_intx32,
80 &raid6_intx16,
81#endif
82 &raid6_intx8,
83 &raid6_intx4,
84 &raid6_intx2,
85 &raid6_intx1,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086 NULL
87};
88
89void (*raid6_2data_recov)(int, size_t, int, int, void **);
90EXPORT_SYMBOL_GPL(raid6_2data_recov);
91
92void (*raid6_datap_recov)(int, size_t, int, void **);
93EXPORT_SYMBOL_GPL(raid6_datap_recov);
94
95const struct raid6_recov_calls *const raid6_recov_algos[] = {
Olivier Deprez157378f2022-04-04 15:47:50 +020096#ifdef CONFIG_X86
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097#ifdef CONFIG_AS_AVX512
98 &raid6_recov_avx512,
99#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000100 &raid6_recov_avx2,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101 &raid6_recov_ssse3,
102#endif
103#ifdef CONFIG_S390
104 &raid6_recov_s390xc,
105#endif
106#if defined(CONFIG_KERNEL_MODE_NEON)
107 &raid6_recov_neon,
108#endif
109 &raid6_recov_intx1,
110 NULL
111};
112
113#ifdef __KERNEL__
114#define RAID6_TIME_JIFFIES_LG2 4
115#else
116/* Need more time to be stable in userspace */
117#define RAID6_TIME_JIFFIES_LG2 9
118#define time_before(x, y) ((x) < (y))
119#endif
120
Olivier Deprez157378f2022-04-04 15:47:50 +0200121#define RAID6_TEST_DISKS 8
122#define RAID6_TEST_DISKS_ORDER 3
123
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124static inline const struct raid6_recov_calls *raid6_choose_recov(void)
125{
126 const struct raid6_recov_calls *const *algo;
127 const struct raid6_recov_calls *best;
128
129 for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
130 if (!best || (*algo)->priority > best->priority)
131 if (!(*algo)->valid || (*algo)->valid())
132 best = *algo;
133
134 if (best) {
135 raid6_2data_recov = best->data2;
136 raid6_datap_recov = best->datap;
137
138 pr_info("raid6: using %s recovery algorithm\n", best->name);
139 } else
140 pr_err("raid6: Yikes! No recovery algorithm found!\n");
141
142 return best;
143}
144
145static inline const struct raid6_calls *raid6_choose_gen(
Olivier Deprez157378f2022-04-04 15:47:50 +0200146 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147{
148 unsigned long perf, bestgenperf, bestxorperf, j0, j1;
149 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
150 const struct raid6_calls *const *algo;
151 const struct raid6_calls *best;
152
153 for (bestgenperf = 0, bestxorperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
154 if (!best || (*algo)->prefer >= best->prefer) {
155 if ((*algo)->valid && !(*algo)->valid())
156 continue;
157
David Brazdil0f672f62019-12-10 10:32:29 +0000158 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
159 best = *algo;
160 break;
161 }
162
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163 perf = 0;
164
165 preempt_disable();
166 j0 = jiffies;
167 while ((j1 = jiffies) == j0)
168 cpu_relax();
169 while (time_before(jiffies,
170 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
171 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
172 perf++;
173 }
174 preempt_enable();
175
176 if (perf > bestgenperf) {
177 bestgenperf = perf;
178 best = *algo;
179 }
180 pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
Olivier Deprez157378f2022-04-04 15:47:50 +0200181 (perf * HZ * (disks-2)) >>
182 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183
184 if (!(*algo)->xor_syndrome)
185 continue;
186
187 perf = 0;
188
189 preempt_disable();
190 j0 = jiffies;
191 while ((j1 = jiffies) == j0)
192 cpu_relax();
193 while (time_before(jiffies,
194 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
195 (*algo)->xor_syndrome(disks, start, stop,
196 PAGE_SIZE, *dptrs);
197 perf++;
198 }
199 preempt_enable();
200
201 if (best == *algo)
202 bestxorperf = perf;
203
204 pr_info("raid6: %-8s xor() %5ld MB/s\n", (*algo)->name,
Olivier Deprez157378f2022-04-04 15:47:50 +0200205 (perf * HZ * (disks-2)) >>
206 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000207 }
208 }
209
210 if (best) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200211 if (IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
212 pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
213 best->name,
214 (bestgenperf * HZ * (disks-2)) >>
215 (20 - PAGE_SHIFT+RAID6_TIME_JIFFIES_LG2));
216 if (best->xor_syndrome)
217 pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
218 (bestxorperf * HZ * (disks-2)) >>
219 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
220 } else
221 pr_info("raid6: skip pq benchmark and using algorithm %s\n",
222 best->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000223 raid6_call = *best;
224 } else
225 pr_err("raid6: Yikes! No algorithm found!\n");
226
227 return best;
228}
229
230
231/* Try to pick the best algorithm */
232/* This code uses the gfmul table as convenient data set to abuse */
233
234int __init raid6_select_algo(void)
235{
Olivier Deprez157378f2022-04-04 15:47:50 +0200236 const int disks = RAID6_TEST_DISKS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000237
238 const struct raid6_calls *gen_best;
239 const struct raid6_recov_calls *rec_best;
Olivier Deprez157378f2022-04-04 15:47:50 +0200240 char *disk_ptr, *p;
241 void *dptrs[RAID6_TEST_DISKS];
242 int i, cycle;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000243
Olivier Deprez157378f2022-04-04 15:47:50 +0200244 /* prepare the buffer and fill it circularly with gfmul table */
245 disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
246 if (!disk_ptr) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000247 pr_err("raid6: Yikes! No memory available.\n");
248 return -ENOMEM;
249 }
250
Olivier Deprez157378f2022-04-04 15:47:50 +0200251 p = disk_ptr;
252 for (i = 0; i < disks; i++)
253 dptrs[i] = p + PAGE_SIZE * i;
254
255 cycle = ((disks - 2) * PAGE_SIZE) / 65536;
256 for (i = 0; i < cycle; i++) {
257 memcpy(p, raid6_gfmul, 65536);
258 p += 65536;
259 }
260
261 if ((disks - 2) * PAGE_SIZE % 65536)
262 memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263
264 /* select raid gen_syndrome function */
265 gen_best = raid6_choose_gen(&dptrs, disks);
266
267 /* select raid recover functions */
268 rec_best = raid6_choose_recov();
269
Olivier Deprez157378f2022-04-04 15:47:50 +0200270 free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000271
272 return gen_best && rec_best ? 0 : -EINVAL;
273}
274
275static void raid6_exit(void)
276{
277 do { } while (0);
278}
279
280subsys_initcall(raid6_select_algo);
281module_exit(raid6_exit);
282MODULE_LICENSE("GPL");
283MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");