| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 
|  | 2 | /* | 
|  | 3 | * kernel/workqueue_internal.h | 
|  | 4 | * | 
|  | 5 | * Workqueue internal header file.  Only to be included by workqueue and | 
|  | 6 | * core kernel subsystems. | 
|  | 7 | */ | 
|  | 8 | #ifndef _KERNEL_WORKQUEUE_INTERNAL_H | 
|  | 9 | #define _KERNEL_WORKQUEUE_INTERNAL_H | 
|  | 10 |  | 
|  | 11 | #include <linux/workqueue.h> | 
|  | 12 | #include <linux/kthread.h> | 
|  | 13 | #include <linux/preempt.h> | 
|  | 14 |  | 
|  | 15 | struct worker_pool; | 
|  | 16 |  | 
|  | 17 | /* | 
|  | 18 | * The poor guys doing the actual heavy lifting.  All on-duty workers are | 
|  | 19 | * either serving the manager role, on idle list or on busy hash.  For | 
|  | 20 | * details on the locking annotation (L, I, X...), refer to workqueue.c. | 
|  | 21 | * | 
|  | 22 | * Only to be used in workqueue and async. | 
|  | 23 | */ | 
|  | 24 | struct worker { | 
|  | 25 | /* on idle list while idle, on busy hash table while busy */ | 
|  | 26 | union { | 
|  | 27 | struct list_head	entry;	/* L: while idle */ | 
|  | 28 | struct hlist_node	hentry;	/* L: while busy */ | 
|  | 29 | }; | 
|  | 30 |  | 
|  | 31 | struct work_struct	*current_work;	/* L: work being processed */ | 
|  | 32 | work_func_t		current_func;	/* L: current_work's fn */ | 
|  | 33 | struct pool_workqueue	*current_pwq; /* L: current_work's pwq */ | 
|  | 34 | struct list_head	scheduled;	/* L: scheduled works */ | 
|  | 35 |  | 
|  | 36 | /* 64 bytes boundary on 64bit, 32 on 32bit */ | 
|  | 37 |  | 
|  | 38 | struct task_struct	*task;		/* I: worker task */ | 
|  | 39 | struct worker_pool	*pool;		/* A: the associated pool */ | 
|  | 40 | /* L: for rescuers */ | 
|  | 41 | struct list_head	node;		/* A: anchored at pool->workers */ | 
|  | 42 | /* A: runs through worker->node */ | 
|  | 43 |  | 
|  | 44 | unsigned long		last_active;	/* L: last active timestamp */ | 
|  | 45 | unsigned int		flags;		/* X: flags */ | 
|  | 46 | int			id;		/* I: worker id */ | 
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 47 | int			sleeping;	/* None */ | 
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 |  | 
|  | 49 | /* | 
|  | 50 | * Opaque string set with work_set_desc().  Printed out with task | 
|  | 51 | * dump for debugging - WARN, BUG, panic or sysrq. | 
|  | 52 | */ | 
|  | 53 | char			desc[WORKER_DESC_LEN]; | 
|  | 54 |  | 
|  | 55 | /* used only by rescuers to point to the target workqueue */ | 
|  | 56 | struct workqueue_struct	*rescue_wq;	/* I: the workqueue to rescue */ | 
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 57 |  | 
|  | 58 | /* used by the scheduler to determine a worker's last known identity */ | 
|  | 59 | work_func_t		last_func; | 
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | }; | 
|  | 61 |  | 
|  | 62 | /** | 
|  | 63 | * current_wq_worker - return struct worker if %current is a workqueue worker | 
|  | 64 | */ | 
|  | 65 | static inline struct worker *current_wq_worker(void) | 
|  | 66 | { | 
|  | 67 | if (in_task() && (current->flags & PF_WQ_WORKER)) | 
|  | 68 | return kthread_data(current); | 
|  | 69 | return NULL; | 
|  | 70 | } | 
|  | 71 |  | 
|  | 72 | /* | 
|  | 73 | * Scheduler hooks for concurrency managed workqueue.  Only to be used from | 
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 74 | * sched/ and workqueue.c. | 
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | */ | 
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 76 | void wq_worker_running(struct task_struct *task); | 
|  | 77 | void wq_worker_sleeping(struct task_struct *task); | 
|  | 78 | work_func_t wq_worker_last_func(struct task_struct *task); | 
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 |  | 
|  | 80 | #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ |