blob: 92a4867e8adc7cc0ed66763c4db96487d0184bb6 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * padata.c - generic interface to process data streams in parallel
4 *
5 * See Documentation/padata.txt for an api documentation.
6 *
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 */
23
24#include <linux/export.h>
25#include <linux/cpumask.h>
26#include <linux/err.h>
27#include <linux/cpu.h>
28#include <linux/padata.h>
29#include <linux/mutex.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/sysfs.h>
33#include <linux/rcupdate.h>
34#include <linux/module.h>
35
36#define MAX_OBJ_NUM 1000
37
Olivier Deprez0e641232021-09-23 10:07:05 +020038static void padata_free_pd(struct parallel_data *pd);
39
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
41{
42 int cpu, target_cpu;
43
44 target_cpu = cpumask_first(pd->cpumask.pcpu);
45 for (cpu = 0; cpu < cpu_index; cpu++)
46 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
47
48 return target_cpu;
49}
50
David Brazdil0f672f62019-12-10 10:32:29 +000051static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000053 /*
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
56 */
David Brazdil0f672f62019-12-10 10:32:29 +000057 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058
59 return padata_index_to_cpu(pd, cpu_index);
60}
61
62static void padata_parallel_worker(struct work_struct *parallel_work)
63{
64 struct padata_parallel_queue *pqueue;
65 LIST_HEAD(local_list);
66
67 local_bh_disable();
68 pqueue = container_of(parallel_work,
69 struct padata_parallel_queue, work);
70
71 spin_lock(&pqueue->parallel.lock);
72 list_replace_init(&pqueue->parallel.list, &local_list);
73 spin_unlock(&pqueue->parallel.lock);
74
75 while (!list_empty(&local_list)) {
76 struct padata_priv *padata;
77
78 padata = list_entry(local_list.next,
79 struct padata_priv, list);
80
81 list_del_init(&padata->list);
82
83 padata->parallel(padata);
84 }
85
86 local_bh_enable();
87}
88
89/**
90 * padata_do_parallel - padata parallelization function
91 *
Olivier Deprez0e641232021-09-23 10:07:05 +020092 * @ps: padatashell
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093 * @padata: object to be parallelized
David Brazdil0f672f62019-12-10 10:32:29 +000094 * @cb_cpu: pointer to the CPU that the serialization callback function should
95 * run on. If it's not in the serial cpumask of @pinst
96 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
97 * none found, returns -EINVAL.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098 *
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
102 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200103int padata_do_parallel(struct padata_shell *ps,
David Brazdil0f672f62019-12-10 10:32:29 +0000104 struct padata_priv *padata, int *cb_cpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105{
Olivier Deprez0e641232021-09-23 10:07:05 +0200106 struct padata_instance *pinst = ps->pinst;
David Brazdil0f672f62019-12-10 10:32:29 +0000107 int i, cpu, cpu_index, target_cpu, err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108 struct padata_parallel_queue *queue;
109 struct parallel_data *pd;
110
111 rcu_read_lock_bh();
112
Olivier Deprez0e641232021-09-23 10:07:05 +0200113 pd = rcu_dereference_bh(ps->pd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114
115 err = -EINVAL;
116 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
117 goto out;
118
David Brazdil0f672f62019-12-10 10:32:29 +0000119 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
120 if (!cpumask_weight(pd->cpumask.cbcpu))
121 goto out;
122
123 /* Select an alternate fallback CPU and notify the caller. */
124 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
125
126 cpu = cpumask_first(pd->cpumask.cbcpu);
127 for (i = 0; i < cpu_index; i++)
128 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
129
130 *cb_cpu = cpu;
131 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132
133 err = -EBUSY;
134 if ((pinst->flags & PADATA_RESET))
135 goto out;
136
137 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
138 goto out;
139
140 err = 0;
141 atomic_inc(&pd->refcnt);
142 padata->pd = pd;
David Brazdil0f672f62019-12-10 10:32:29 +0000143 padata->cb_cpu = *cb_cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144
David Brazdil0f672f62019-12-10 10:32:29 +0000145 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
146 target_cpu = padata_cpu_hash(pd, padata->seq_nr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000147 padata->cpu = target_cpu;
148 queue = per_cpu_ptr(pd->pqueue, target_cpu);
149
150 spin_lock(&queue->parallel.lock);
151 list_add_tail(&padata->list, &queue->parallel.list);
152 spin_unlock(&queue->parallel.lock);
153
David Brazdil0f672f62019-12-10 10:32:29 +0000154 queue_work(pinst->parallel_wq, &queue->work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155
156out:
157 rcu_read_unlock_bh();
158
159 return err;
160}
161EXPORT_SYMBOL(padata_do_parallel);
162
163/*
David Brazdil0f672f62019-12-10 10:32:29 +0000164 * padata_find_next - Find the next object that needs serialization.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000165 *
166 * Return values are:
167 *
168 * A pointer to the control struct of the next object that needs
169 * serialization, if present in one of the percpu reorder queues.
170 *
David Brazdil0f672f62019-12-10 10:32:29 +0000171 * NULL, if the next object that needs serialization will
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 * be parallel processed by another cpu and is not yet present in
173 * the cpu's reorder queue.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 */
David Brazdil0f672f62019-12-10 10:32:29 +0000175static struct padata_priv *padata_find_next(struct parallel_data *pd,
176 bool remove_object)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 struct padata_parallel_queue *next_queue;
179 struct padata_priv *padata;
180 struct padata_list *reorder;
David Brazdil0f672f62019-12-10 10:32:29 +0000181 int cpu = pd->cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 next_queue = per_cpu_ptr(pd->pqueue, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000184 reorder = &next_queue->reorder;
185
186 spin_lock(&reorder->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000187 if (list_empty(&reorder->list)) {
188 spin_unlock(&reorder->lock);
189 return NULL;
190 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191
David Brazdil0f672f62019-12-10 10:32:29 +0000192 padata = list_entry(reorder->list.next, struct padata_priv, list);
193
194 /*
195 * Checks the rare case where two or more parallel jobs have hashed to
196 * the same CPU and one of the later ones finishes first.
197 */
198 if (padata->seq_nr != pd->processed) {
199 spin_unlock(&reorder->lock);
200 return NULL;
201 }
202
203 if (remove_object) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000204 list_del_init(&padata->list);
205 atomic_dec(&pd->reorder_objects);
David Brazdil0f672f62019-12-10 10:32:29 +0000206 ++pd->processed;
207 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208 }
David Brazdil0f672f62019-12-10 10:32:29 +0000209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210 spin_unlock(&reorder->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000211 return padata;
212}
213
214static void padata_reorder(struct parallel_data *pd)
215{
Olivier Deprez0e641232021-09-23 10:07:05 +0200216 struct padata_instance *pinst = pd->ps->pinst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217 int cb_cpu;
218 struct padata_priv *padata;
219 struct padata_serial_queue *squeue;
David Brazdil0f672f62019-12-10 10:32:29 +0000220 struct padata_parallel_queue *next_queue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000221
222 /*
223 * We need to ensure that only one cpu can work on dequeueing of
224 * the reorder queue the time. Calculating in which percpu reorder
225 * queue the next object will arrive takes some time. A spinlock
226 * would be highly contended. Also it is not clear in which order
227 * the objects arrive to the reorder queues. So a cpu could wait to
228 * get the lock just to notice that there is nothing to do at the
229 * moment. Therefore we use a trylock and let the holder of the lock
230 * care for all the objects enqueued during the holdtime of the lock.
231 */
232 if (!spin_trylock_bh(&pd->lock))
233 return;
234
235 while (1) {
David Brazdil0f672f62019-12-10 10:32:29 +0000236 padata = padata_find_next(pd, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000237
238 /*
239 * If the next object that needs serialization is parallel
240 * processed by another cpu and is still on it's way to the
241 * cpu's reorder queue, nothing to do for now.
242 */
David Brazdil0f672f62019-12-10 10:32:29 +0000243 if (!padata)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244 break;
245
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246 cb_cpu = padata->cb_cpu;
247 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
248
249 spin_lock(&squeue->serial.lock);
250 list_add_tail(&padata->list, &squeue->serial.list);
251 spin_unlock(&squeue->serial.lock);
252
David Brazdil0f672f62019-12-10 10:32:29 +0000253 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 }
255
256 spin_unlock_bh(&pd->lock);
257
258 /*
259 * The next object that needs serialization might have arrived to
David Brazdil0f672f62019-12-10 10:32:29 +0000260 * the reorder queues in the meantime.
261 *
262 * Ensure reorder queue is read after pd->lock is dropped so we see
263 * new objects from another task in padata_do_serial. Pairs with
264 * smp_mb__after_atomic in padata_do_serial.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000265 */
David Brazdil0f672f62019-12-10 10:32:29 +0000266 smp_mb();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267
David Brazdil0f672f62019-12-10 10:32:29 +0000268 next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
269 if (!list_empty(&next_queue->reorder.list) &&
270 padata_find_next(pd, false))
271 queue_work(pinst->serial_wq, &pd->reorder_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272}
273
274static void invoke_padata_reorder(struct work_struct *work)
275{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276 struct parallel_data *pd;
277
278 local_bh_disable();
David Brazdil0f672f62019-12-10 10:32:29 +0000279 pd = container_of(work, struct parallel_data, reorder_work);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 padata_reorder(pd);
281 local_bh_enable();
282}
283
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284static void padata_serial_worker(struct work_struct *serial_work)
285{
286 struct padata_serial_queue *squeue;
287 struct parallel_data *pd;
288 LIST_HEAD(local_list);
Olivier Deprez0e641232021-09-23 10:07:05 +0200289 int cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290
291 local_bh_disable();
292 squeue = container_of(serial_work, struct padata_serial_queue, work);
293 pd = squeue->pd;
294
295 spin_lock(&squeue->serial.lock);
296 list_replace_init(&squeue->serial.list, &local_list);
297 spin_unlock(&squeue->serial.lock);
298
Olivier Deprez0e641232021-09-23 10:07:05 +0200299 cnt = 0;
300
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301 while (!list_empty(&local_list)) {
302 struct padata_priv *padata;
303
304 padata = list_entry(local_list.next,
305 struct padata_priv, list);
306
307 list_del_init(&padata->list);
308
309 padata->serial(padata);
Olivier Deprez0e641232021-09-23 10:07:05 +0200310 cnt++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311 }
312 local_bh_enable();
Olivier Deprez0e641232021-09-23 10:07:05 +0200313
314 if (atomic_sub_and_test(cnt, &pd->refcnt))
315 padata_free_pd(pd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000316}
317
318/**
319 * padata_do_serial - padata serialization function
320 *
321 * @padata: object to be serialized.
322 *
323 * padata_do_serial must be called for every parallelized object.
324 * The serialization callback function will run with BHs off.
325 */
326void padata_do_serial(struct padata_priv *padata)
327{
David Brazdil0f672f62019-12-10 10:32:29 +0000328 struct parallel_data *pd = padata->pd;
329 struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
330 padata->cpu);
331 struct padata_priv *cur;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000332
333 spin_lock(&pqueue->reorder.lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000334 /* Sort in ascending order of sequence number. */
335 list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
336 if (cur->seq_nr < padata->seq_nr)
337 break;
338 list_add(&padata->list, &cur->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000339 atomic_inc(&pd->reorder_objects);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000340 spin_unlock(&pqueue->reorder.lock);
341
David Brazdil0f672f62019-12-10 10:32:29 +0000342 /*
343 * Ensure the addition to the reorder list is ordered correctly
344 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
345 * in padata_reorder.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346 */
David Brazdil0f672f62019-12-10 10:32:29 +0000347 smp_mb__after_atomic();
348
349 padata_reorder(pd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000350}
351EXPORT_SYMBOL(padata_do_serial);
352
Olivier Deprez0e641232021-09-23 10:07:05 +0200353static int padata_setup_cpumasks(struct padata_instance *pinst)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000354{
David Brazdil0f672f62019-12-10 10:32:29 +0000355 struct workqueue_attrs *attrs;
Olivier Deprez0e641232021-09-23 10:07:05 +0200356 int err;
357
358 attrs = alloc_workqueue_attrs();
359 if (!attrs)
360 return -ENOMEM;
361
362 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
363 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
364 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
365 free_workqueue_attrs(attrs);
366
367 return err;
368}
369
370static int pd_setup_cpumasks(struct parallel_data *pd,
371 const struct cpumask *pcpumask,
372 const struct cpumask *cbcpumask)
373{
David Brazdil0f672f62019-12-10 10:32:29 +0000374 int err = -ENOMEM;
375
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
David Brazdil0f672f62019-12-10 10:32:29 +0000377 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +0000378 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
379 goto free_pcpu_mask;
David Brazdil0f672f62019-12-10 10:32:29 +0000380
Olivier Deprez0e641232021-09-23 10:07:05 +0200381 cpumask_copy(pd->cpumask.pcpu, pcpumask);
382 cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
David Brazdil0f672f62019-12-10 10:32:29 +0000383
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000385
David Brazdil0f672f62019-12-10 10:32:29 +0000386free_pcpu_mask:
387 free_cpumask_var(pd->cpumask.pcpu);
388out:
389 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390}
391
392static void __padata_list_init(struct padata_list *pd_list)
393{
394 INIT_LIST_HEAD(&pd_list->list);
395 spin_lock_init(&pd_list->lock);
396}
397
398/* Initialize all percpu queues used by serial workers */
399static void padata_init_squeues(struct parallel_data *pd)
400{
401 int cpu;
402 struct padata_serial_queue *squeue;
403
404 for_each_cpu(cpu, pd->cpumask.cbcpu) {
405 squeue = per_cpu_ptr(pd->squeue, cpu);
406 squeue->pd = pd;
407 __padata_list_init(&squeue->serial);
408 INIT_WORK(&squeue->work, padata_serial_worker);
409 }
410}
411
412/* Initialize all percpu queues used by parallel workers */
413static void padata_init_pqueues(struct parallel_data *pd)
414{
David Brazdil0f672f62019-12-10 10:32:29 +0000415 int cpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000416 struct padata_parallel_queue *pqueue;
417
David Brazdil0f672f62019-12-10 10:32:29 +0000418 for_each_cpu(cpu, pd->cpumask.pcpu) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419 pqueue = per_cpu_ptr(pd->pqueue, cpu);
420
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421 __padata_list_init(&pqueue->reorder);
422 __padata_list_init(&pqueue->parallel);
423 INIT_WORK(&pqueue->work, padata_parallel_worker);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424 atomic_set(&pqueue->num_obj, 0);
425 }
426}
427
428/* Allocate and initialize the internal cpumask dependend resources. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200429static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430{
Olivier Deprez0e641232021-09-23 10:07:05 +0200431 struct padata_instance *pinst = ps->pinst;
432 const struct cpumask *cbcpumask;
433 const struct cpumask *pcpumask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 struct parallel_data *pd;
435
Olivier Deprez0e641232021-09-23 10:07:05 +0200436 cbcpumask = pinst->rcpumask.cbcpu;
437 pcpumask = pinst->rcpumask.pcpu;
438
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000439 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
440 if (!pd)
441 goto err;
442
443 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
444 if (!pd->pqueue)
445 goto err_free_pd;
446
447 pd->squeue = alloc_percpu(struct padata_serial_queue);
448 if (!pd->squeue)
449 goto err_free_pqueue;
David Brazdil0f672f62019-12-10 10:32:29 +0000450
Olivier Deprez0e641232021-09-23 10:07:05 +0200451 pd->ps = ps;
452 if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453 goto err_free_squeue;
454
455 padata_init_pqueues(pd);
456 padata_init_squeues(pd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 atomic_set(&pd->seq_nr, -1);
458 atomic_set(&pd->reorder_objects, 0);
Olivier Deprez0e641232021-09-23 10:07:05 +0200459 atomic_set(&pd->refcnt, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460 spin_lock_init(&pd->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000461 pd->cpu = cpumask_first(pd->cpumask.pcpu);
462 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000463
464 return pd;
465
466err_free_squeue:
467 free_percpu(pd->squeue);
468err_free_pqueue:
469 free_percpu(pd->pqueue);
470err_free_pd:
471 kfree(pd);
472err:
473 return NULL;
474}
475
476static void padata_free_pd(struct parallel_data *pd)
477{
478 free_cpumask_var(pd->cpumask.pcpu);
479 free_cpumask_var(pd->cpumask.cbcpu);
480 free_percpu(pd->pqueue);
481 free_percpu(pd->squeue);
482 kfree(pd);
483}
484
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000485static void __padata_start(struct padata_instance *pinst)
486{
487 pinst->flags |= PADATA_INIT;
488}
489
490static void __padata_stop(struct padata_instance *pinst)
491{
492 if (!(pinst->flags & PADATA_INIT))
493 return;
494
495 pinst->flags &= ~PADATA_INIT;
496
497 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498}
499
500/* Replace the internal control structure with a new one. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200501static int padata_replace_one(struct padata_shell *ps)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502{
Olivier Deprez0e641232021-09-23 10:07:05 +0200503 struct parallel_data *pd_new;
504
505 pd_new = padata_alloc_pd(ps);
506 if (!pd_new)
507 return -ENOMEM;
508
509 ps->opd = rcu_dereference_protected(ps->pd, 1);
510 rcu_assign_pointer(ps->pd, pd_new);
511
512 return 0;
513}
514
515static int padata_replace(struct padata_instance *pinst)
516{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517 int notification_mask = 0;
Olivier Deprez0e641232021-09-23 10:07:05 +0200518 struct padata_shell *ps;
519 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000520
521 pinst->flags |= PADATA_RESET;
522
Olivier Deprez0e641232021-09-23 10:07:05 +0200523 cpumask_copy(pinst->omask, pinst->rcpumask.pcpu);
524 cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
525 cpu_online_mask);
526 if (!cpumask_equal(pinst->omask, pinst->rcpumask.pcpu))
527 notification_mask |= PADATA_CPU_PARALLEL;
528
529 cpumask_copy(pinst->omask, pinst->rcpumask.cbcpu);
530 cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
531 cpu_online_mask);
532 if (!cpumask_equal(pinst->omask, pinst->rcpumask.cbcpu))
533 notification_mask |= PADATA_CPU_SERIAL;
534
535 list_for_each_entry(ps, &pinst->pslist, list) {
536 err = padata_replace_one(ps);
537 if (err)
538 break;
539 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000540
541 synchronize_rcu();
542
Olivier Deprez0e641232021-09-23 10:07:05 +0200543 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
544 if (atomic_dec_and_test(&ps->opd->refcnt))
545 padata_free_pd(ps->opd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546
547 if (notification_mask)
548 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
549 notification_mask,
Olivier Deprez0e641232021-09-23 10:07:05 +0200550 &pinst->cpumask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551
552 pinst->flags &= ~PADATA_RESET;
Olivier Deprez0e641232021-09-23 10:07:05 +0200553
554 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555}
556
557/**
558 * padata_register_cpumask_notifier - Registers a notifier that will be called
559 * if either pcpu or cbcpu or both cpumasks change.
560 *
561 * @pinst: A poineter to padata instance
562 * @nblock: A pointer to notifier block.
563 */
564int padata_register_cpumask_notifier(struct padata_instance *pinst,
565 struct notifier_block *nblock)
566{
567 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
568 nblock);
569}
570EXPORT_SYMBOL(padata_register_cpumask_notifier);
571
572/**
573 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
574 * registered earlier using padata_register_cpumask_notifier
575 *
576 * @pinst: A pointer to data instance.
577 * @nlock: A pointer to notifier block.
578 */
579int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
580 struct notifier_block *nblock)
581{
582 return blocking_notifier_chain_unregister(
583 &pinst->cpumask_change_notifier,
584 nblock);
585}
586EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
587
588
589/* If cpumask contains no active cpu, we mark the instance as invalid. */
590static bool padata_validate_cpumask(struct padata_instance *pinst,
591 const struct cpumask *cpumask)
592{
593 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
594 pinst->flags |= PADATA_INVALID;
595 return false;
596 }
597
598 pinst->flags &= ~PADATA_INVALID;
599 return true;
600}
601
602static int __padata_set_cpumasks(struct padata_instance *pinst,
603 cpumask_var_t pcpumask,
604 cpumask_var_t cbcpumask)
605{
606 int valid;
Olivier Deprez0e641232021-09-23 10:07:05 +0200607 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000608
609 valid = padata_validate_cpumask(pinst, pcpumask);
610 if (!valid) {
611 __padata_stop(pinst);
612 goto out_replace;
613 }
614
615 valid = padata_validate_cpumask(pinst, cbcpumask);
616 if (!valid)
617 __padata_stop(pinst);
618
619out_replace:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000620 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
621 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
622
Olivier Deprez0e641232021-09-23 10:07:05 +0200623 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000624
625 if (valid)
626 __padata_start(pinst);
627
Olivier Deprez0e641232021-09-23 10:07:05 +0200628 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000629}
630
631/**
632 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
633 * equivalent to @cpumask.
634 *
635 * @pinst: padata instance
636 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
637 * to parallel and serial cpumasks respectively.
638 * @cpumask: the cpumask to use
639 */
640int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
641 cpumask_var_t cpumask)
642{
643 struct cpumask *serial_mask, *parallel_mask;
644 int err = -EINVAL;
645
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646 get_online_cpus();
Olivier Deprez0e641232021-09-23 10:07:05 +0200647 mutex_lock(&pinst->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000648
649 switch (cpumask_type) {
650 case PADATA_CPU_PARALLEL:
651 serial_mask = pinst->cpumask.cbcpu;
652 parallel_mask = cpumask;
653 break;
654 case PADATA_CPU_SERIAL:
655 parallel_mask = pinst->cpumask.pcpu;
656 serial_mask = cpumask;
657 break;
658 default:
659 goto out;
660 }
661
662 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
663
664out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000665 mutex_unlock(&pinst->lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200666 put_online_cpus();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667
668 return err;
669}
670EXPORT_SYMBOL(padata_set_cpumask);
671
672/**
673 * padata_start - start the parallel processing
674 *
675 * @pinst: padata instance to start
676 */
677int padata_start(struct padata_instance *pinst)
678{
679 int err = 0;
680
681 mutex_lock(&pinst->lock);
682
683 if (pinst->flags & PADATA_INVALID)
684 err = -EINVAL;
685
David Brazdil0f672f62019-12-10 10:32:29 +0000686 __padata_start(pinst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000687
688 mutex_unlock(&pinst->lock);
689
690 return err;
691}
692EXPORT_SYMBOL(padata_start);
693
694/**
695 * padata_stop - stop the parallel processing
696 *
697 * @pinst: padata instance to stop
698 */
699void padata_stop(struct padata_instance *pinst)
700{
701 mutex_lock(&pinst->lock);
702 __padata_stop(pinst);
703 mutex_unlock(&pinst->lock);
704}
705EXPORT_SYMBOL(padata_stop);
706
707#ifdef CONFIG_HOTPLUG_CPU
708
709static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
710{
Olivier Deprez0e641232021-09-23 10:07:05 +0200711 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000712
713 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200714 err = padata_replace(pinst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000715
716 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
717 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
718 __padata_start(pinst);
719 }
720
Olivier Deprez0e641232021-09-23 10:07:05 +0200721 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722}
723
724static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
725{
Olivier Deprez0e641232021-09-23 10:07:05 +0200726 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000727
Olivier Deprez0e641232021-09-23 10:07:05 +0200728 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000729 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
730 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
731 __padata_stop(pinst);
732
Olivier Deprez0e641232021-09-23 10:07:05 +0200733 err = padata_replace(pinst);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000734 }
735
Olivier Deprez0e641232021-09-23 10:07:05 +0200736 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000737}
738
739 /**
740 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
741 * padata cpumasks.
742 *
743 * @pinst: padata instance
744 * @cpu: cpu to remove
745 * @mask: bitmask specifying from which cpumask @cpu should be removed
746 * The @mask may be any combination of the following flags:
747 * PADATA_CPU_SERIAL - serial cpumask
748 * PADATA_CPU_PARALLEL - parallel cpumask
749 */
750int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
751{
752 int err;
753
754 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
755 return -EINVAL;
756
757 mutex_lock(&pinst->lock);
758
759 get_online_cpus();
760 if (mask & PADATA_CPU_SERIAL)
761 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
762 if (mask & PADATA_CPU_PARALLEL)
763 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
764
765 err = __padata_remove_cpu(pinst, cpu);
766 put_online_cpus();
767
768 mutex_unlock(&pinst->lock);
769
770 return err;
771}
772EXPORT_SYMBOL(padata_remove_cpu);
773
774static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
775{
776 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
777 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
778}
779
780static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
781{
782 struct padata_instance *pinst;
783 int ret;
784
Olivier Deprez0e641232021-09-23 10:07:05 +0200785 pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000786 if (!pinst_has_cpu(pinst, cpu))
787 return 0;
788
789 mutex_lock(&pinst->lock);
790 ret = __padata_add_cpu(pinst, cpu);
791 mutex_unlock(&pinst->lock);
792 return ret;
793}
794
Olivier Deprez0e641232021-09-23 10:07:05 +0200795static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000796{
797 struct padata_instance *pinst;
798 int ret;
799
Olivier Deprez0e641232021-09-23 10:07:05 +0200800 pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000801 if (!pinst_has_cpu(pinst, cpu))
802 return 0;
803
804 mutex_lock(&pinst->lock);
805 ret = __padata_remove_cpu(pinst, cpu);
806 mutex_unlock(&pinst->lock);
807 return ret;
808}
809
810static enum cpuhp_state hp_online;
811#endif
812
813static void __padata_free(struct padata_instance *pinst)
814{
815#ifdef CONFIG_HOTPLUG_CPU
Olivier Deprez0e641232021-09-23 10:07:05 +0200816 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
817 &pinst->cpu_dead_node);
818 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000819#endif
820
Olivier Deprez0e641232021-09-23 10:07:05 +0200821 WARN_ON(!list_empty(&pinst->pslist));
822
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000823 padata_stop(pinst);
Olivier Deprez0e641232021-09-23 10:07:05 +0200824 free_cpumask_var(pinst->omask);
825 free_cpumask_var(pinst->rcpumask.cbcpu);
826 free_cpumask_var(pinst->rcpumask.pcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000827 free_cpumask_var(pinst->cpumask.pcpu);
828 free_cpumask_var(pinst->cpumask.cbcpu);
David Brazdil0f672f62019-12-10 10:32:29 +0000829 destroy_workqueue(pinst->serial_wq);
830 destroy_workqueue(pinst->parallel_wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831 kfree(pinst);
832}
833
834#define kobj2pinst(_kobj) \
835 container_of(_kobj, struct padata_instance, kobj)
836#define attr2pentry(_attr) \
837 container_of(_attr, struct padata_sysfs_entry, attr)
838
839static void padata_sysfs_release(struct kobject *kobj)
840{
841 struct padata_instance *pinst = kobj2pinst(kobj);
842 __padata_free(pinst);
843}
844
845struct padata_sysfs_entry {
846 struct attribute attr;
847 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
848 ssize_t (*store)(struct padata_instance *, struct attribute *,
849 const char *, size_t);
850};
851
852static ssize_t show_cpumask(struct padata_instance *pinst,
853 struct attribute *attr, char *buf)
854{
855 struct cpumask *cpumask;
856 ssize_t len;
857
858 mutex_lock(&pinst->lock);
859 if (!strcmp(attr->name, "serial_cpumask"))
860 cpumask = pinst->cpumask.cbcpu;
861 else
862 cpumask = pinst->cpumask.pcpu;
863
864 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
865 nr_cpu_ids, cpumask_bits(cpumask));
866 mutex_unlock(&pinst->lock);
867 return len < PAGE_SIZE ? len : -EINVAL;
868}
869
870static ssize_t store_cpumask(struct padata_instance *pinst,
871 struct attribute *attr,
872 const char *buf, size_t count)
873{
874 cpumask_var_t new_cpumask;
875 ssize_t ret;
876 int mask_type;
877
878 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
879 return -ENOMEM;
880
881 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
882 nr_cpumask_bits);
883 if (ret < 0)
884 goto out;
885
886 mask_type = !strcmp(attr->name, "serial_cpumask") ?
887 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
888 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
889 if (!ret)
890 ret = count;
891
892out:
893 free_cpumask_var(new_cpumask);
894 return ret;
895}
896
897#define PADATA_ATTR_RW(_name, _show_name, _store_name) \
898 static struct padata_sysfs_entry _name##_attr = \
899 __ATTR(_name, 0644, _show_name, _store_name)
900#define PADATA_ATTR_RO(_name, _show_name) \
901 static struct padata_sysfs_entry _name##_attr = \
902 __ATTR(_name, 0400, _show_name, NULL)
903
904PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
905PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
906
907/*
908 * Padata sysfs provides the following objects:
909 * serial_cpumask [RW] - cpumask for serial workers
910 * parallel_cpumask [RW] - cpumask for parallel workers
911 */
912static struct attribute *padata_default_attrs[] = {
913 &serial_cpumask_attr.attr,
914 &parallel_cpumask_attr.attr,
915 NULL,
916};
David Brazdil0f672f62019-12-10 10:32:29 +0000917ATTRIBUTE_GROUPS(padata_default);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000918
919static ssize_t padata_sysfs_show(struct kobject *kobj,
920 struct attribute *attr, char *buf)
921{
922 struct padata_instance *pinst;
923 struct padata_sysfs_entry *pentry;
924 ssize_t ret = -EIO;
925
926 pinst = kobj2pinst(kobj);
927 pentry = attr2pentry(attr);
928 if (pentry->show)
929 ret = pentry->show(pinst, attr, buf);
930
931 return ret;
932}
933
934static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
935 const char *buf, size_t count)
936{
937 struct padata_instance *pinst;
938 struct padata_sysfs_entry *pentry;
939 ssize_t ret = -EIO;
940
941 pinst = kobj2pinst(kobj);
942 pentry = attr2pentry(attr);
943 if (pentry->show)
944 ret = pentry->store(pinst, attr, buf, count);
945
946 return ret;
947}
948
949static const struct sysfs_ops padata_sysfs_ops = {
950 .show = padata_sysfs_show,
951 .store = padata_sysfs_store,
952};
953
954static struct kobj_type padata_attr_type = {
955 .sysfs_ops = &padata_sysfs_ops,
David Brazdil0f672f62019-12-10 10:32:29 +0000956 .default_groups = padata_default_groups,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000957 .release = padata_sysfs_release,
958};
959
960/**
961 * padata_alloc - allocate and initialize a padata instance and specify
962 * cpumasks for serial and parallel workers.
963 *
David Brazdil0f672f62019-12-10 10:32:29 +0000964 * @name: used to identify the instance
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000965 * @pcpumask: cpumask that will be used for padata parallelization
966 * @cbcpumask: cpumask that will be used for padata serialization
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000967 */
David Brazdil0f672f62019-12-10 10:32:29 +0000968static struct padata_instance *padata_alloc(const char *name,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969 const struct cpumask *pcpumask,
970 const struct cpumask *cbcpumask)
971{
972 struct padata_instance *pinst;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000973
974 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
975 if (!pinst)
976 goto err;
977
David Brazdil0f672f62019-12-10 10:32:29 +0000978 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
979 name);
980 if (!pinst->parallel_wq)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000981 goto err_free_inst;
David Brazdil0f672f62019-12-10 10:32:29 +0000982
983 get_online_cpus();
984
985 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
986 WQ_CPU_INTENSIVE, 1, name);
987 if (!pinst->serial_wq)
988 goto err_put_cpus;
989
990 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
991 goto err_free_serial_wq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000992 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
993 free_cpumask_var(pinst->cpumask.pcpu);
David Brazdil0f672f62019-12-10 10:32:29 +0000994 goto err_free_serial_wq;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000995 }
996 if (!padata_validate_cpumask(pinst, pcpumask) ||
997 !padata_validate_cpumask(pinst, cbcpumask))
998 goto err_free_masks;
999
Olivier Deprez0e641232021-09-23 10:07:05 +02001000 if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001001 goto err_free_masks;
Olivier Deprez0e641232021-09-23 10:07:05 +02001002 if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
1003 goto err_free_rcpumask_pcpu;
1004 if (!alloc_cpumask_var(&pinst->omask, GFP_KERNEL))
1005 goto err_free_rcpumask_cbcpu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001006
Olivier Deprez0e641232021-09-23 10:07:05 +02001007 INIT_LIST_HEAD(&pinst->pslist);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001009 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
1010 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
Olivier Deprez0e641232021-09-23 10:07:05 +02001011 cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
1012 cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
1013
1014 if (padata_setup_cpumasks(pinst))
1015 goto err_free_omask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001016
1017 pinst->flags = 0;
1018
1019 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1020 kobject_init(&pinst->kobj, &padata_attr_type);
1021 mutex_init(&pinst->lock);
1022
1023#ifdef CONFIG_HOTPLUG_CPU
Olivier Deprez0e641232021-09-23 10:07:05 +02001024 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1025 &pinst->cpu_online_node);
1026 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1027 &pinst->cpu_dead_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001028#endif
David Brazdil0f672f62019-12-10 10:32:29 +00001029
1030 put_online_cpus();
1031
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001032 return pinst;
1033
Olivier Deprez0e641232021-09-23 10:07:05 +02001034err_free_omask:
1035 free_cpumask_var(pinst->omask);
1036err_free_rcpumask_cbcpu:
1037 free_cpumask_var(pinst->rcpumask.cbcpu);
1038err_free_rcpumask_pcpu:
1039 free_cpumask_var(pinst->rcpumask.pcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001040err_free_masks:
1041 free_cpumask_var(pinst->cpumask.pcpu);
1042 free_cpumask_var(pinst->cpumask.cbcpu);
David Brazdil0f672f62019-12-10 10:32:29 +00001043err_free_serial_wq:
1044 destroy_workqueue(pinst->serial_wq);
1045err_put_cpus:
1046 put_online_cpus();
1047 destroy_workqueue(pinst->parallel_wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001048err_free_inst:
1049 kfree(pinst);
1050err:
1051 return NULL;
1052}
1053
1054/**
1055 * padata_alloc_possible - Allocate and initialize padata instance.
1056 * Use the cpu_possible_mask for serial and
1057 * parallel workers.
1058 *
David Brazdil0f672f62019-12-10 10:32:29 +00001059 * @name: used to identify the instance
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001060 */
David Brazdil0f672f62019-12-10 10:32:29 +00001061struct padata_instance *padata_alloc_possible(const char *name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001062{
David Brazdil0f672f62019-12-10 10:32:29 +00001063 return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001064}
1065EXPORT_SYMBOL(padata_alloc_possible);
1066
1067/**
1068 * padata_free - free a padata instance
1069 *
1070 * @padata_inst: padata instance to free
1071 */
1072void padata_free(struct padata_instance *pinst)
1073{
1074 kobject_put(&pinst->kobj);
1075}
1076EXPORT_SYMBOL(padata_free);
1077
Olivier Deprez0e641232021-09-23 10:07:05 +02001078/**
1079 * padata_alloc_shell - Allocate and initialize padata shell.
1080 *
1081 * @pinst: Parent padata_instance object.
1082 */
1083struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1084{
1085 struct parallel_data *pd;
1086 struct padata_shell *ps;
1087
1088 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1089 if (!ps)
1090 goto out;
1091
1092 ps->pinst = pinst;
1093
1094 get_online_cpus();
1095 pd = padata_alloc_pd(ps);
1096 put_online_cpus();
1097
1098 if (!pd)
1099 goto out_free_ps;
1100
1101 mutex_lock(&pinst->lock);
1102 RCU_INIT_POINTER(ps->pd, pd);
1103 list_add(&ps->list, &pinst->pslist);
1104 mutex_unlock(&pinst->lock);
1105
1106 return ps;
1107
1108out_free_ps:
1109 kfree(ps);
1110out:
1111 return NULL;
1112}
1113EXPORT_SYMBOL(padata_alloc_shell);
1114
1115/**
1116 * padata_free_shell - free a padata shell
1117 *
1118 * @ps: padata shell to free
1119 */
1120void padata_free_shell(struct padata_shell *ps)
1121{
1122 struct padata_instance *pinst = ps->pinst;
1123
1124 mutex_lock(&pinst->lock);
1125 list_del(&ps->list);
1126 padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1127 mutex_unlock(&pinst->lock);
1128
1129 kfree(ps);
1130}
1131EXPORT_SYMBOL(padata_free_shell);
1132
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001133#ifdef CONFIG_HOTPLUG_CPU
1134
1135static __init int padata_driver_init(void)
1136{
1137 int ret;
1138
1139 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
Olivier Deprez0e641232021-09-23 10:07:05 +02001140 padata_cpu_online, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 if (ret < 0)
1142 return ret;
1143 hp_online = ret;
Olivier Deprez0e641232021-09-23 10:07:05 +02001144
1145 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1146 NULL, padata_cpu_dead);
1147 if (ret < 0) {
1148 cpuhp_remove_multi_state(hp_online);
1149 return ret;
1150 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001151 return 0;
1152}
1153module_init(padata_driver_init);
1154
1155static __exit void padata_driver_exit(void)
1156{
Olivier Deprez0e641232021-09-23 10:07:05 +02001157 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 cpuhp_remove_multi_state(hp_online);
1159}
1160module_exit(padata_driver_exit);
1161#endif