Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef IOPRIO_H |
| 3 | #define IOPRIO_H |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/sched/rt.h> |
| 7 | #include <linux/iocontext.h> |
| 8 | |
| 9 | /* |
| 10 | * Gives us 8 prio classes with 13-bits of data for each class |
| 11 | */ |
| 12 | #define IOPRIO_CLASS_SHIFT (13) |
| 13 | #define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) |
| 14 | |
| 15 | #define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) |
| 16 | #define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) |
| 17 | #define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) |
| 18 | |
| 19 | #define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) |
| 20 | |
| 21 | /* |
| 22 | * These are the io priority groups as implemented by CFQ. RT is the realtime |
| 23 | * class, it always gets premium service. BE is the best-effort scheduling |
| 24 | * class, the default for any process. IDLE is the idle scheduling class, it |
| 25 | * is only served when no one else is using the disk. |
| 26 | */ |
| 27 | enum { |
| 28 | IOPRIO_CLASS_NONE, |
| 29 | IOPRIO_CLASS_RT, |
| 30 | IOPRIO_CLASS_BE, |
| 31 | IOPRIO_CLASS_IDLE, |
| 32 | }; |
| 33 | |
| 34 | /* |
| 35 | * 8 best effort priority levels are supported |
| 36 | */ |
| 37 | #define IOPRIO_BE_NR (8) |
| 38 | |
| 39 | enum { |
| 40 | IOPRIO_WHO_PROCESS = 1, |
| 41 | IOPRIO_WHO_PGRP, |
| 42 | IOPRIO_WHO_USER, |
| 43 | }; |
| 44 | |
| 45 | /* |
| 46 | * Fallback BE priority |
| 47 | */ |
| 48 | #define IOPRIO_NORM (4) |
| 49 | |
| 50 | /* |
| 51 | * if process has set io priority explicitly, use that. if not, convert |
| 52 | * the cpu scheduler nice value to an io priority |
| 53 | */ |
| 54 | static inline int task_nice_ioprio(struct task_struct *task) |
| 55 | { |
| 56 | return (task_nice(task) + 20) / 5; |
| 57 | } |
| 58 | |
| 59 | /* |
| 60 | * This is for the case where the task hasn't asked for a specific IO class. |
| 61 | * Check for idle and rt task process, and return appropriate IO class. |
| 62 | */ |
| 63 | static inline int task_nice_ioclass(struct task_struct *task) |
| 64 | { |
| 65 | if (task->policy == SCHED_IDLE) |
| 66 | return IOPRIO_CLASS_IDLE; |
| 67 | else if (task_is_realtime(task)) |
| 68 | return IOPRIO_CLASS_RT; |
| 69 | else |
| 70 | return IOPRIO_CLASS_BE; |
| 71 | } |
| 72 | |
| 73 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 74 | * If the calling process has set an I/O priority, use that. Otherwise, return |
| 75 | * the default I/O priority. |
| 76 | */ |
| 77 | static inline int get_current_ioprio(void) |
| 78 | { |
| 79 | struct io_context *ioc = current->io_context; |
| 80 | |
| 81 | if (ioc) |
| 82 | return ioc->ioprio; |
| 83 | return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); |
| 84 | } |
| 85 | |
| 86 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | * For inheritance, return the highest of the two given priorities |
| 88 | */ |
| 89 | extern int ioprio_best(unsigned short aprio, unsigned short bprio); |
| 90 | |
| 91 | extern int set_task_ioprio(struct task_struct *task, int ioprio); |
| 92 | |
| 93 | #ifdef CONFIG_BLOCK |
| 94 | extern int ioprio_check_cap(int ioprio); |
| 95 | #else |
| 96 | static inline int ioprio_check_cap(int ioprio) |
| 97 | { |
| 98 | return -ENOTBLK; |
| 99 | } |
| 100 | #endif /* CONFIG_BLOCK */ |
| 101 | |
| 102 | #endif |