Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_PM_QOS_H |
| 3 | #define _LINUX_PM_QOS_H |
| 4 | /* interface for the pm_qos_power infrastructure of the linux kernel. |
| 5 | * |
| 6 | * Mark Gross <mgross@linux.intel.com> |
| 7 | */ |
| 8 | #include <linux/plist.h> |
| 9 | #include <linux/notifier.h> |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/workqueue.h> |
| 12 | |
| 13 | enum { |
| 14 | PM_QOS_RESERVED = 0, |
| 15 | PM_QOS_CPU_DMA_LATENCY, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | |
| 17 | /* insert new class ID */ |
| 18 | PM_QOS_NUM_CLASSES, |
| 19 | }; |
| 20 | |
| 21 | enum pm_qos_flags_status { |
| 22 | PM_QOS_FLAGS_UNDEFINED = -1, |
| 23 | PM_QOS_FLAGS_NONE, |
| 24 | PM_QOS_FLAGS_SOME, |
| 25 | PM_QOS_FLAGS_ALL, |
| 26 | }; |
| 27 | |
| 28 | #define PM_QOS_DEFAULT_VALUE (-1) |
| 29 | #define PM_QOS_LATENCY_ANY S32_MAX |
| 30 | #define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC) |
| 31 | |
| 32 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 33 | #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY |
| 34 | #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY |
| 35 | #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS |
| 36 | #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 |
| 37 | #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) |
| 38 | |
| 39 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) |
| 40 | |
| 41 | struct pm_qos_request { |
| 42 | struct plist_node node; |
| 43 | int pm_qos_class; |
| 44 | struct delayed_work work; /* for pm_qos_update_request_timeout */ |
| 45 | }; |
| 46 | |
| 47 | struct pm_qos_flags_request { |
| 48 | struct list_head node; |
| 49 | s32 flags; /* Do not change to 64 bit */ |
| 50 | }; |
| 51 | |
| 52 | enum dev_pm_qos_req_type { |
| 53 | DEV_PM_QOS_RESUME_LATENCY = 1, |
| 54 | DEV_PM_QOS_LATENCY_TOLERANCE, |
| 55 | DEV_PM_QOS_FLAGS, |
| 56 | }; |
| 57 | |
| 58 | struct dev_pm_qos_request { |
| 59 | enum dev_pm_qos_req_type type; |
| 60 | union { |
| 61 | struct plist_node pnode; |
| 62 | struct pm_qos_flags_request flr; |
| 63 | } data; |
| 64 | struct device *dev; |
| 65 | }; |
| 66 | |
| 67 | enum pm_qos_type { |
| 68 | PM_QOS_UNITIALIZED, |
| 69 | PM_QOS_MAX, /* return the largest value */ |
| 70 | PM_QOS_MIN, /* return the smallest value */ |
| 71 | PM_QOS_SUM /* return the sum */ |
| 72 | }; |
| 73 | |
| 74 | /* |
| 75 | * Note: The lockless read path depends on the CPU accessing target_value |
| 76 | * or effective_flags atomically. Atomic access is only guaranteed on all CPU |
| 77 | * types linux supports for 32 bit quantites |
| 78 | */ |
| 79 | struct pm_qos_constraints { |
| 80 | struct plist_head list; |
| 81 | s32 target_value; /* Do not change to 64 bit */ |
| 82 | s32 default_value; |
| 83 | s32 no_constraint_value; |
| 84 | enum pm_qos_type type; |
| 85 | struct blocking_notifier_head *notifiers; |
| 86 | }; |
| 87 | |
| 88 | struct pm_qos_flags { |
| 89 | struct list_head list; |
| 90 | s32 effective_flags; /* Do not change to 64 bit */ |
| 91 | }; |
| 92 | |
| 93 | struct dev_pm_qos { |
| 94 | struct pm_qos_constraints resume_latency; |
| 95 | struct pm_qos_constraints latency_tolerance; |
| 96 | struct pm_qos_flags flags; |
| 97 | struct dev_pm_qos_request *resume_latency_req; |
| 98 | struct dev_pm_qos_request *latency_tolerance_req; |
| 99 | struct dev_pm_qos_request *flags_req; |
| 100 | }; |
| 101 | |
| 102 | /* Action requested to pm_qos_update_target */ |
| 103 | enum pm_qos_req_action { |
| 104 | PM_QOS_ADD_REQ, /* Add a new request */ |
| 105 | PM_QOS_UPDATE_REQ, /* Update an existing request */ |
| 106 | PM_QOS_REMOVE_REQ /* Remove an existing request */ |
| 107 | }; |
| 108 | |
| 109 | static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req) |
| 110 | { |
| 111 | return req->dev != NULL; |
| 112 | } |
| 113 | |
| 114 | int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, |
| 115 | enum pm_qos_req_action action, int value); |
| 116 | bool pm_qos_update_flags(struct pm_qos_flags *pqf, |
| 117 | struct pm_qos_flags_request *req, |
| 118 | enum pm_qos_req_action action, s32 val); |
| 119 | void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class, |
| 120 | s32 value); |
| 121 | void pm_qos_update_request(struct pm_qos_request *req, |
| 122 | s32 new_value); |
| 123 | void pm_qos_update_request_timeout(struct pm_qos_request *req, |
| 124 | s32 new_value, unsigned long timeout_us); |
| 125 | void pm_qos_remove_request(struct pm_qos_request *req); |
| 126 | |
| 127 | int pm_qos_request(int pm_qos_class); |
| 128 | int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier); |
| 129 | int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier); |
| 130 | int pm_qos_request_active(struct pm_qos_request *req); |
| 131 | s32 pm_qos_read_value(struct pm_qos_constraints *c); |
| 132 | |
| 133 | #ifdef CONFIG_PM |
| 134 | enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); |
| 135 | enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 136 | s32 __dev_pm_qos_resume_latency(struct device *dev); |
| 137 | s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 138 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, |
| 139 | enum dev_pm_qos_req_type type, s32 value); |
| 140 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value); |
| 141 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req); |
| 142 | int dev_pm_qos_add_notifier(struct device *dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 143 | struct notifier_block *notifier, |
| 144 | enum dev_pm_qos_req_type type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 145 | int dev_pm_qos_remove_notifier(struct device *dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 146 | struct notifier_block *notifier, |
| 147 | enum dev_pm_qos_req_type type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | void dev_pm_qos_constraints_init(struct device *dev); |
| 149 | void dev_pm_qos_constraints_destroy(struct device *dev); |
| 150 | int dev_pm_qos_add_ancestor_request(struct device *dev, |
| 151 | struct dev_pm_qos_request *req, |
| 152 | enum dev_pm_qos_req_type type, s32 value); |
| 153 | int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value); |
| 154 | void dev_pm_qos_hide_latency_limit(struct device *dev); |
| 155 | int dev_pm_qos_expose_flags(struct device *dev, s32 value); |
| 156 | void dev_pm_qos_hide_flags(struct device *dev); |
| 157 | int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); |
| 158 | s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); |
| 159 | int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); |
| 160 | int dev_pm_qos_expose_latency_tolerance(struct device *dev); |
| 161 | void dev_pm_qos_hide_latency_tolerance(struct device *dev); |
| 162 | |
| 163 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) |
| 164 | { |
| 165 | return dev->power.qos->resume_latency_req->data.pnode.prio; |
| 166 | } |
| 167 | |
| 168 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) |
| 169 | { |
| 170 | return dev->power.qos->flags_req->data.flr.flags; |
| 171 | } |
| 172 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 173 | static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 174 | { |
| 175 | return IS_ERR_OR_NULL(dev->power.qos) ? |
| 176 | PM_QOS_RESUME_LATENCY_NO_CONSTRAINT : |
| 177 | pm_qos_read_value(&dev->power.qos->resume_latency); |
| 178 | } |
| 179 | #else |
| 180 | static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, |
| 181 | s32 mask) |
| 182 | { return PM_QOS_FLAGS_UNDEFINED; } |
| 183 | static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, |
| 184 | s32 mask) |
| 185 | { return PM_QOS_FLAGS_UNDEFINED; } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 186 | static inline s32 __dev_pm_qos_resume_latency(struct device *dev) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 187 | { return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 188 | static inline s32 dev_pm_qos_read_value(struct device *dev, |
| 189 | enum dev_pm_qos_req_type type) |
| 190 | { |
| 191 | switch (type) { |
| 192 | case DEV_PM_QOS_RESUME_LATENCY: |
| 193 | return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
| 194 | default: |
| 195 | WARN_ON(1); |
| 196 | return 0; |
| 197 | } |
| 198 | } |
| 199 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 200 | static inline int dev_pm_qos_add_request(struct device *dev, |
| 201 | struct dev_pm_qos_request *req, |
| 202 | enum dev_pm_qos_req_type type, |
| 203 | s32 value) |
| 204 | { return 0; } |
| 205 | static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req, |
| 206 | s32 new_value) |
| 207 | { return 0; } |
| 208 | static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) |
| 209 | { return 0; } |
| 210 | static inline int dev_pm_qos_add_notifier(struct device *dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 211 | struct notifier_block *notifier, |
| 212 | enum dev_pm_qos_req_type type) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 213 | { return 0; } |
| 214 | static inline int dev_pm_qos_remove_notifier(struct device *dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 215 | struct notifier_block *notifier, |
| 216 | enum dev_pm_qos_req_type type) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 217 | { return 0; } |
| 218 | static inline void dev_pm_qos_constraints_init(struct device *dev) |
| 219 | { |
| 220 | dev->power.power_state = PMSG_ON; |
| 221 | } |
| 222 | static inline void dev_pm_qos_constraints_destroy(struct device *dev) |
| 223 | { |
| 224 | dev->power.power_state = PMSG_INVALID; |
| 225 | } |
| 226 | static inline int dev_pm_qos_add_ancestor_request(struct device *dev, |
| 227 | struct dev_pm_qos_request *req, |
| 228 | enum dev_pm_qos_req_type type, |
| 229 | s32 value) |
| 230 | { return 0; } |
| 231 | static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) |
| 232 | { return 0; } |
| 233 | static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} |
| 234 | static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value) |
| 235 | { return 0; } |
| 236 | static inline void dev_pm_qos_hide_flags(struct device *dev) {} |
| 237 | static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set) |
| 238 | { return 0; } |
| 239 | static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) |
| 240 | { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } |
| 241 | static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) |
| 242 | { return 0; } |
| 243 | static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) |
| 244 | { return 0; } |
| 245 | static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} |
| 246 | |
| 247 | static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) |
| 248 | { |
| 249 | return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
| 250 | } |
| 251 | static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 252 | static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 253 | { |
| 254 | return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
| 255 | } |
| 256 | #endif |
| 257 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 258 | #define FREQ_QOS_MIN_DEFAULT_VALUE 0 |
| 259 | #define FREQ_QOS_MAX_DEFAULT_VALUE (-1) |
| 260 | |
| 261 | enum freq_qos_req_type { |
| 262 | FREQ_QOS_MIN = 1, |
| 263 | FREQ_QOS_MAX, |
| 264 | }; |
| 265 | |
| 266 | struct freq_constraints { |
| 267 | struct pm_qos_constraints min_freq; |
| 268 | struct blocking_notifier_head min_freq_notifiers; |
| 269 | struct pm_qos_constraints max_freq; |
| 270 | struct blocking_notifier_head max_freq_notifiers; |
| 271 | }; |
| 272 | |
| 273 | struct freq_qos_request { |
| 274 | enum freq_qos_req_type type; |
| 275 | struct plist_node pnode; |
| 276 | struct freq_constraints *qos; |
| 277 | }; |
| 278 | |
| 279 | static inline int freq_qos_request_active(struct freq_qos_request *req) |
| 280 | { |
| 281 | return !IS_ERR_OR_NULL(req->qos); |
| 282 | } |
| 283 | |
| 284 | void freq_constraints_init(struct freq_constraints *qos); |
| 285 | |
| 286 | s32 freq_qos_read_value(struct freq_constraints *qos, |
| 287 | enum freq_qos_req_type type); |
| 288 | |
| 289 | int freq_qos_add_request(struct freq_constraints *qos, |
| 290 | struct freq_qos_request *req, |
| 291 | enum freq_qos_req_type type, s32 value); |
| 292 | int freq_qos_update_request(struct freq_qos_request *req, s32 new_value); |
| 293 | int freq_qos_remove_request(struct freq_qos_request *req); |
| 294 | |
| 295 | int freq_qos_add_notifier(struct freq_constraints *qos, |
| 296 | enum freq_qos_req_type type, |
| 297 | struct notifier_block *notifier); |
| 298 | int freq_qos_remove_notifier(struct freq_constraints *qos, |
| 299 | enum freq_qos_req_type type, |
| 300 | struct notifier_block *notifier); |
| 301 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 302 | #endif |